aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/line6/variax.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/line6/variax.c')
-rw-r--r--drivers/staging/line6/variax.c494
1 files changed, 5 insertions, 489 deletions
diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
index f97416b1de5..ae2be99f9a9 100644
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -12,28 +12,13 @@
#include <linux/slab.h>
#include "audio.h"
-#include "control.h"
#include "driver.h"
#include "variax.h"
-#define VARIAX_SYSEX_CODE 7
-#define VARIAX_SYSEX_PARAM 0x3b
-#define VARIAX_SYSEX_ACTIVATE 0x2a
-#define VARIAX_MODEL_HEADER_LENGTH 7
-#define VARIAX_MODEL_MESSAGE_LENGTH 199
#define VARIAX_OFFSET_ACTIVATE 7
/*
This message is sent by the device during initialization and identifies
- the connected guitar model.
-*/
-static const char variax_init_model[] = {
- 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x69, 0x02,
- 0x00
-};
-
-/*
- This message is sent by the device during initialization and identifies
the connected guitar version.
*/
static const char variax_init_version[] = {
@@ -53,43 +38,11 @@ static const char variax_activate[] = {
0xf7
};
-static const char variax_request_bank[] = {
- 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x6d, 0xf7
-};
-
-static const char variax_request_model1[] = {
- 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x3c, 0x00,
- 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x05, 0x03,
- 0x00, 0x00, 0x00, 0xf7
-};
-
-static const char variax_request_model2[] = {
- 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x3c, 0x00,
- 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x07, 0x03,
- 0x00, 0x00, 0x00, 0xf7
-};
-
/* forward declarations: */
-static int variax_create_files2(struct device *dev);
static void variax_startup2(unsigned long data);
static void variax_startup4(unsigned long data);
static void variax_startup5(unsigned long data);
-/*
- Decode data transmitted by workbench.
-*/
-static void variax_decode(const unsigned char *raw_data, unsigned char *data,
- int raw_size)
-{
- for (; raw_size > 0; raw_size -= 6) {
- data[2] = raw_data[0] | (raw_data[1] << 4);
- data[1] = raw_data[2] | (raw_data[3] << 4);
- data[0] = raw_data[4] | (raw_data[5] << 4);
- raw_data += 6;
- data += 3;
- }
-}
-
static void variax_activate_async(struct usb_line6_variax *variax, int a)
{
variax->buffer_activate[VARIAX_OFFSET_ACTIVATE] = a;
@@ -142,6 +95,7 @@ static void variax_startup3(struct usb_line6_variax *variax)
static void variax_startup4(unsigned long data)
{
struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
+
CHECK_STARTUP_PROGRESS(variax->startup_progress,
VARIAX_STARTUP_ACTIVATE);
@@ -154,17 +108,7 @@ static void variax_startup4(unsigned long data)
static void variax_startup5(unsigned long data)
{
struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
- CHECK_STARTUP_PROGRESS(variax->startup_progress,
- VARIAX_STARTUP_DUMPREQ);
-
- /* current model dump: */
- line6_dump_request_async(&variax->dumpreq, &variax->line6, 0,
- VARIAX_DUMP_PASS1);
- /* passes 2 and 3 are performed implicitly before entering variax_startup6 */
-}
-static void variax_startup6(struct usb_line6_variax *variax)
-{
CHECK_STARTUP_PROGRESS(variax->startup_progress,
VARIAX_STARTUP_WORKQUEUE);
@@ -172,20 +116,15 @@ static void variax_startup6(struct usb_line6_variax *variax)
schedule_work(&variax->startup_work);
}
-static void variax_startup7(struct work_struct *work)
+static void variax_startup6(struct work_struct *work)
{
struct usb_line6_variax *variax =
container_of(work, struct usb_line6_variax, startup_work);
- struct usb_line6 *line6 = &variax->line6;
CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_SETUP);
/* ALSA audio interface: */
line6_register_audio(&variax->line6);
-
- /* device files: */
- line6_variax_create_files(0, 0, line6->ifcdev);
- variax_create_files2(line6->ifcdev);
}
/*
@@ -196,377 +135,24 @@ void line6_variax_process_message(struct usb_line6_variax *variax)
const unsigned char *buf = variax->line6.buffer_message;
switch (buf[0]) {
- case LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST:
- switch (buf[1]) {
- case VARIAXMIDI_volume:
- variax->volume = buf[2];
- break;
-
- case VARIAXMIDI_tone:
- variax->tone = buf[2];
- }
-
- break;
-
- case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_DEVICE:
- case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST:
- variax->model = buf[1];
- line6_dump_request_async(&variax->dumpreq, &variax->line6, 0,
- VARIAX_DUMP_PASS1);
- break;
-
case LINE6_RESET:
dev_info(variax->line6.ifcdev, "VARIAX reset\n");
break;
case LINE6_SYSEX_BEGIN:
- if (memcmp(buf + 1, variax_request_model1 + 1,
- VARIAX_MODEL_HEADER_LENGTH - 1) == 0) {
- if (variax->line6.message_length ==
- VARIAX_MODEL_MESSAGE_LENGTH) {
- switch (variax->dumpreq.in_progress) {
- case VARIAX_DUMP_PASS1:
- variax_decode(buf +
- VARIAX_MODEL_HEADER_LENGTH,
- (unsigned char *)
- &variax->model_data,
- (sizeof
- (variax->model_data.
- name) +
- sizeof(variax->
- model_data.
- control)
- / 2) * 2);
- line6_dump_request_async
- (&variax->dumpreq, &variax->line6,
- 1, VARIAX_DUMP_PASS2);
- break;
-
- case VARIAX_DUMP_PASS2:
- /* model name is transmitted twice, so skip it here: */
- variax_decode(buf +
- VARIAX_MODEL_HEADER_LENGTH,
- (unsigned char *)
- &variax->
- model_data.control +
- sizeof(variax->model_data.
- control)
- / 2,
- sizeof(variax->model_data.
- control)
- / 2 * 2);
- line6_dump_request_async
- (&variax->dumpreq, &variax->line6,
- 2, VARIAX_DUMP_PASS3);
- }
- } else {
- DEBUG_MESSAGES(dev_err
- (variax->line6.ifcdev,
- "illegal length %d of model data\n",
- variax->line6.message_length));
- line6_dump_finished(&variax->dumpreq);
- }
- } else if (memcmp(buf + 1, variax_request_bank + 1,
- sizeof(variax_request_bank) - 2) == 0) {
- memcpy(variax->bank,
- buf + sizeof(variax_request_bank) - 1,
- sizeof(variax->bank));
- line6_dump_finished(&variax->dumpreq);
- variax_startup6(variax);
- } else if (memcmp(buf + 1, variax_init_model + 1,
- sizeof(variax_init_model) - 1) == 0) {
- memcpy(variax->guitar,
- buf + sizeof(variax_init_model),
- sizeof(variax->guitar));
- } else if (memcmp(buf + 1, variax_init_version + 1,
- sizeof(variax_init_version) - 1) == 0) {
+ if (memcmp(buf + 1, variax_init_version + 1,
+ sizeof(variax_init_version) - 1) == 0) {
variax_startup3(variax);
} else if (memcmp(buf + 1, variax_init_done + 1,
sizeof(variax_init_done) - 1) == 0) {
/* notify of complete initialization: */
variax_startup4((unsigned long)variax);
}
-
break;
-
- case LINE6_SYSEX_END:
- break;
-
- default:
- DEBUG_MESSAGES(dev_err
- (variax->line6.ifcdev,
- "Variax: unknown message %02X\n", buf[0]));
- }
-}
-
-/*
- "read" request on "volume" special file.
-*/
-static ssize_t variax_get_volume(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- return sprintf(buf, "%d\n", variax->volume);
-}
-
-/*
- "write" request on "volume" special file.
-*/
-static ssize_t variax_set_volume(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- u8 value;
- int ret;
-
- ret = kstrtou8(buf, 10, &value);
- if (ret)
- return ret;
-
- if (line6_transmit_parameter(&variax->line6, VARIAXMIDI_volume,
- value) == 0)
- variax->volume = value;
-
- return count;
-}
-
-/*
- "read" request on "model" special file.
-*/
-static ssize_t variax_get_model(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- return sprintf(buf, "%d\n", variax->model);
-}
-
-/*
- "write" request on "model" special file.
-*/
-static ssize_t variax_set_model(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- u8 value;
- int ret;
-
- ret = kstrtou8(buf, 10, &value);
- if (ret)
- return ret;
-
- if (line6_send_program(&variax->line6, value) == 0)
- variax->model = value;
-
- return count;
-}
-
-/*
- "read" request on "active" special file.
-*/
-static ssize_t variax_get_active(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- return sprintf(buf, "%d\n",
- variax->buffer_activate[VARIAX_OFFSET_ACTIVATE]);
-}
-
-/*
- "write" request on "active" special file.
-*/
-static ssize_t variax_set_active(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- u8 value;
- int ret;
-
- ret = kstrtou8(buf, 10, &value);
- if (ret)
- return ret;
-
- variax_activate_async(variax, value ? 1 : 0);
- return count;
-}
-
-/*
- "read" request on "tone" special file.
-*/
-static ssize_t variax_get_tone(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- return sprintf(buf, "%d\n", variax->tone);
-}
-
-/*
- "write" request on "tone" special file.
-*/
-static ssize_t variax_set_tone(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- u8 value;
- int ret;
-
- ret = kstrtou8(buf, 10, &value);
- if (ret)
- return ret;
-
- if (line6_transmit_parameter(&variax->line6, VARIAXMIDI_tone,
- value) == 0)
- variax->tone = value;
-
- return count;
-}
-
-static ssize_t get_string(char *buf, const char *data, int length)
-{
- int i;
- memcpy(buf, data, length);
-
- for (i = length; i--;) {
- char c = buf[i];
-
- if ((c != 0) && (c != ' '))
- break;
}
-
- buf[i + 1] = '\n';
- return i + 2;
}
/*
- "read" request on "name" special file.
-*/
-static ssize_t variax_get_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- line6_dump_wait_interruptible(&variax->dumpreq);
- return get_string(buf, variax->model_data.name,
- sizeof(variax->model_data.name));
-}
-
-/*
- "read" request on "bank" special file.
-*/
-static ssize_t variax_get_bank(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- line6_dump_wait_interruptible(&variax->dumpreq);
- return get_string(buf, variax->bank, sizeof(variax->bank));
-}
-
-/*
- "read" request on "dump" special file.
-*/
-static ssize_t variax_get_dump(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- int retval;
- retval = line6_dump_wait_interruptible(&variax->dumpreq);
- if (retval < 0)
- return retval;
- memcpy(buf, &variax->model_data.control,
- sizeof(variax->model_data.control));
- return sizeof(variax->model_data.control);
-}
-
-/*
- "read" request on "guitar" special file.
-*/
-static ssize_t variax_get_guitar(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- return sprintf(buf, "%s\n", variax->guitar);
-}
-
-#ifdef CONFIG_LINE6_USB_RAW
-
-static char *variax_alloc_sysex_buffer(struct usb_line6_variax *variax,
- int code, int size)
-{
- return line6_alloc_sysex_buffer(&variax->line6, VARIAX_SYSEX_CODE, code,
- size);
-}
-
-/*
- "write" request on "raw" special file.
-*/
-static ssize_t variax_set_raw2(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_line6_variax *variax =
- usb_get_intfdata(to_usb_interface(dev));
- int size;
- int i;
- char *sysex;
-
- count -= count % 3;
- size = count * 2;
- sysex = variax_alloc_sysex_buffer(variax, VARIAX_SYSEX_PARAM, size);
-
- if (!sysex)
- return 0;
-
- for (i = 0; i < count; i += 3) {
- const unsigned char *p1 = buf + i;
- char *p2 = sysex + SYSEX_DATA_OFS + i * 2;
- p2[0] = p1[2] & 0x0f;
- p2[1] = p1[2] >> 4;
- p2[2] = p1[1] & 0x0f;
- p2[3] = p1[1] >> 4;
- p2[4] = p1[0] & 0x0f;
- p2[5] = p1[0] >> 4;
- }
-
- line6_send_sysex_message(&variax->line6, sysex, size);
- kfree(sysex);
- return count;
-}
-
-#endif
-
-/* Variax workbench special files: */
-static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model,
- variax_set_model);
-static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume,
- variax_set_volume);
-static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone);
-static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write);
-static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write);
-static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write);
-static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active,
- variax_set_active);
-static DEVICE_ATTR(guitar, S_IRUGO, variax_get_guitar, line6_nop_write);
-
-#ifdef CONFIG_LINE6_USB_RAW
-static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
-static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
-#endif
-
-/*
Variax destructor.
*/
static void variax_destruct(struct usb_interface *interface)
@@ -581,36 +167,10 @@ static void variax_destruct(struct usb_interface *interface)
del_timer(&variax->startup_timer2);
cancel_work_sync(&variax->startup_work);
- /* free dump request data: */
- line6_dumpreq_destructbuf(&variax->dumpreq, 2);
- line6_dumpreq_destructbuf(&variax->dumpreq, 1);
- line6_dumpreq_destruct(&variax->dumpreq);
-
kfree(variax->buffer_activate);
}
/*
- Create sysfs entries.
-*/
-static int variax_create_files2(struct device *dev)
-{
- int err;
- CHECK_RETURN(device_create_file(dev, &dev_attr_model));
- CHECK_RETURN(device_create_file(dev, &dev_attr_volume));
- CHECK_RETURN(device_create_file(dev, &dev_attr_tone));
- CHECK_RETURN(device_create_file(dev, &dev_attr_name));
- CHECK_RETURN(device_create_file(dev, &dev_attr_bank));
- CHECK_RETURN(device_create_file(dev, &dev_attr_dump));
- CHECK_RETURN(device_create_file(dev, &dev_attr_active));
- CHECK_RETURN(device_create_file(dev, &dev_attr_guitar));
-#ifdef CONFIG_LINE6_USB_RAW
- CHECK_RETURN(device_create_file(dev, &dev_attr_raw));
- CHECK_RETURN(device_create_file(dev, &dev_attr_raw2));
-#endif
- return 0;
-}
-
-/*
Try to init workbench device.
*/
static int variax_try_init(struct usb_interface *interface,
@@ -620,36 +180,12 @@ static int variax_try_init(struct usb_interface *interface,
init_timer(&variax->startup_timer1);
init_timer(&variax->startup_timer2);
- INIT_WORK(&variax->startup_work, variax_startup7);
+ INIT_WORK(&variax->startup_work, variax_startup6);
if ((interface == NULL) || (variax == NULL))
return -ENODEV;
/* initialize USB buffers: */
- err = line6_dumpreq_init(&variax->dumpreq, variax_request_model1,
- sizeof(variax_request_model1));
-
- if (err < 0) {
- dev_err(&interface->dev, "Out of memory\n");
- return err;
- }
-
- err = line6_dumpreq_initbuf(&variax->dumpreq, variax_request_model2,
- sizeof(variax_request_model2), 1);
-
- if (err < 0) {
- dev_err(&interface->dev, "Out of memory\n");
- return err;
- }
-
- err = line6_dumpreq_initbuf(&variax->dumpreq, variax_request_bank,
- sizeof(variax_request_bank), 2);
-
- if (err < 0) {
- dev_err(&interface->dev, "Out of memory\n");
- return err;
- }
-
variax->buffer_activate = kmemdup(variax_activate,
sizeof(variax_activate), GFP_KERNEL);
@@ -692,28 +228,8 @@ int line6_variax_init(struct usb_interface *interface,
*/
void line6_variax_disconnect(struct usb_interface *interface)
{
- struct device *dev;
-
if (interface == NULL)
return;
- dev = &interface->dev;
-
- if (dev != NULL) {
- /* remove sysfs entries: */
- line6_variax_remove_files(0, 0, dev);
- device_remove_file(dev, &dev_attr_model);
- device_remove_file(dev, &dev_attr_volume);
- device_remove_file(dev, &dev_attr_tone);
- device_remove_file(dev, &dev_attr_name);
- device_remove_file(dev, &dev_attr_bank);
- device_remove_file(dev, &dev_attr_dump);
- device_remove_file(dev, &dev_attr_active);
- device_remove_file(dev, &dev_attr_guitar);
-#ifdef CONFIG_LINE6_USB_RAW
- device_remove_file(dev, &dev_attr_raw);
- device_remove_file(dev, &dev_attr_raw2);
-#endif
- }
variax_destruct(interface);
}
='add' style='width: 1.4%;'/> -rw-r--r--drivers/gpu/drm/drm_gem.c739
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c371
-rw-r--r--drivers/gpu/drm/drm_global.c (renamed from drivers/gpu/drm/ttm/ttm_global.c)32
-rw-r--r--drivers/gpu/drm/drm_hashtab.c78
-rw-r--r--drivers/gpu/drm/drm_info.c100
-rw-r--r--drivers/gpu/drm/drm_ioc32.c17
-rw-r--r--drivers/gpu/drm/drm_ioctl.c264
-rw-r--r--drivers/gpu/drm/drm_irq.c1211
-rw-r--r--drivers/gpu/drm/drm_lock.c42
-rw-r--r--drivers/gpu/drm/drm_memory.c53
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c329
-rw-r--r--drivers/gpu/drm/drm_mm.c1027
-rw-r--r--drivers/gpu/drm/drm_modes.c875
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c248
-rw-r--r--drivers/gpu/drm/drm_panel.c100
-rw-r--r--drivers/gpu/drm/drm_pci.c408
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c370
-rw-r--r--drivers/gpu/drm/drm_platform.c126
-rw-r--r--drivers/gpu/drm/drm_prime.c761
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c448
-rw-r--r--drivers/gpu/drm/drm_proc.c234
-rw-r--r--drivers/gpu/drm/drm_rect.c295
-rw-r--r--drivers/gpu/drm/drm_scatter.c44
-rw-r--r--drivers/gpu/drm/drm_sman.c352
-rw-r--r--drivers/gpu/drm/drm_stub.c872
-rw-r--r--drivers/gpu/drm/drm_sysfs.c220
-rw-r--r--drivers/gpu/drm/drm_trace.h66
-rw-r--r--drivers/gpu/drm/drm_trace_points.c4
-rw-r--r--drivers/gpu/drm/drm_usb.c88
-rw-r--r--drivers/gpu/drm/drm_vm.c118
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c436
-rw-r--r--drivers/gpu/drm/exynos/Kconfig98
-rw-r--r--drivers/gpu/drm/exynos/Makefile25
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c1393
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h279
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.c1263
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.h366
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c186
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c258
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c161
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c510
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c285
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c347
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c795
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h383
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1546
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c197
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c327
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c371
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1901
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c1035
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1559
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c728
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h192
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1802
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c143
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h70
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c1989
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h252
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c281
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c856
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c702
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2515
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1327
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h20
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h668
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h597
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h152
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/exynos/regs-vp.h91
-rw-r--r--drivers/gpu/drm/gma500/Kconfig38
-rw-r--r--drivers/gpu/drm/gma500/Makefile55
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c364
-rw-r--r--drivers/gpu/drm/gma500/backlight.c94
-rw-r--r--drivers/gpu/drm/gma500/blitter.c51
-rw-r--r--drivers/gpu/drm/gma500/blitter.h22
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c626
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c317
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c999
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1952
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c369
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c798
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c795
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h47
-rw-r--r--drivers/gpu/drm/gma500/gem.c229
-rw-r--r--drivers/gpu/drm/gma500/gem.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c56
-rw-r--r--drivers/gpu/drm/gma500/gma_device.h21
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c791
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h106
-rw-r--r--drivers/gpu/drm/gma500/gtt.c587
-rw-r--r--drivers/gpu/drm/gma500/gtt.h65
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c597
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h621
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c500
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c169
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c551
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1016
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.h79
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c614
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h377
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c688
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h92
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c1035
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.c74
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h77
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c201
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c124
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c338
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.h21
-rw-r--r--drivers/gpu/drm/gma500/mmu.c812
-rw-r--r--drivers/gpu/drm/gma500/mmu.h93
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h257
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c672
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c575
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c856
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c327
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c425
-rw-r--r--drivers/gpu/drm/gma500/opregion.c357
-rw-r--r--drivers/gpu/drm/gma500/opregion.h54
-rw-r--r--drivers/gpu/drm/gma500/power.c332
-rw-r--r--drivers/gpu/drm/gma500/power.h70
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c360
-rw-r--r--drivers/gpu/drm/gma500/psb_device.h24
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c529
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h922
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c584
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h285
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c850
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1545
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2643
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h723
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c684
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h47
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c94
-rw-r--r--drivers/gpu/drm/gma500/psb_reg.h582
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c829
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h38
-rw-r--r--drivers/gpu/drm/i2c/Kconfig28
-rw-r--r--drivers/gpu/drm/i2c/Makefile6
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c52
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c7
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h13
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c464
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c1422
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c171
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c58
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h69
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1551
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c108
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h292
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--drivers/gpu/drm/i915/Kconfig83
-rw-r--r--drivers/gpu/drm/i915/Makefile79
-rw-r--r--drivers/gpu/drm/i915/dvo.h49
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c135
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c103
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c92
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c517
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c82
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c102
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c1061
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3915
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1892
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1625
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2880
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7588
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c810
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c237
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c313
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c277
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c1548
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2150
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h284
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c198
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c508
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c454
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c711
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1264
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4748
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c387
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c519
-rw-r--r--drivers/gpu/drm/i915/i915_params.c158
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5260
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c788
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c609
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h509
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c538
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c165
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1099
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h428
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c796
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1720
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14189
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4734
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1049
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c765
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h138
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c421
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h112
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c589
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c300
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c444
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c280
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c702
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c1616
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c697
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c1469
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c124
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c910
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1365
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1221
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6612
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h48
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c289
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c253
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c479
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2405
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h341
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3998
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h645
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c282
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c1314
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c931
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1079
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c122
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c58
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h194
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c8
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c24
-rw-r--r--drivers/gpu/drm/mga/mga_state.c86
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c10
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig16
-rw-r--r--drivers/gpu/drm/mgag200/Makefile5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c277
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c137
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h310
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c320
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c156
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c367
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1675
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h665
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c432
-rw-r--r--drivers/gpu/drm/msm/Kconfig35
-rw-r--r--drivers/gpu/drm/msm/Makefile43
-rw-r--r--drivers/gpu/drm/msm/NOTES87
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1555
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h2259
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c709
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h411
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c402
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h164
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h266
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h504
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h116
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h50
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c397
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h181
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h575
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c273
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c234
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c436
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c281
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c214
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c157
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h52
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h1033
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c804
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c303
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c418
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h218
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c255
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h1036
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c575
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c258
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c111
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c372
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h214
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c394
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c173
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h41
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h78
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c71
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c145
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h97
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1049
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h257
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c203
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c255
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c702
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h102
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c56
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c427
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c655
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h173
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c165
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h68
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h48
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c275
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c337
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c61
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h43
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig65
-rw-r--r--drivers/gpu/drm/nouveau/Makefile365
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c251
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c196
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c323
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c226
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c254
-rw-r--r--drivers/gpu/drm/nouveau/core/core/namedb.c203
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c474
-rw-r--r--drivers/gpu/drm/nouveau/core/core/option.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc872
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h620
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h606
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c174
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c177
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc698
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h584
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c189
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c566
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/ctrl.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c204
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c427
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c460
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c348
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c280
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c399
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.h75
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c158
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1726
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h212
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c284
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c151
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c146
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c1404
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c266
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c278
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h65
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c180
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h42
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c152
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c132
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/vga.c220
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c120
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c161
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c278
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c644
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h178
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c171
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c349
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c517
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c450
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c955
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c1091
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctx.h (renamed from drivers/gpu/drm/nouveau/nouveau_grctx.h)42
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c991
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c599
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c (renamed from drivers/gpu/drm/nouveau/nv40_grctx.c)181
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c3341
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c1319
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h179
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c797
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c354
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c275
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c522
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c1021
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c885
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc335
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc378
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc542
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcgm107.fuc5.h473
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc542
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h473
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc42
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h530
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc42
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h537
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc42
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h537
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc42
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h537
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc696
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc540
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h916
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc540
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h916
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h1047
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h1047
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h1044
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h1044
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc229
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gm107.c465
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1389
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1320
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c223
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c384
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c134
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c537
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c1010
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c1414
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h234
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c133
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc4.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd9.c191
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c360
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c244
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h274
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c310
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c194
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c231
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/base.c449
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h91
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c243
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv98.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c176
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h470
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/debug.h20
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h174
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h54
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engine.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h24
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h71
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/namedb.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h202
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h20
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h61
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ramht.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/subdev.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/device.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h42
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/falcon.h83
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h85
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h63
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/xtensa.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/P0260.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h28
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h69
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h79
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h66
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h69
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h153
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h162
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h130
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ibus.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h52
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mxm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h80
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h82
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vga.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h135
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c141
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c273
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c221
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/P0260.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c552
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/bit.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/boost.c127
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/conn.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c231
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c209
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c150
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c134
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2185
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/perf.c201
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c416
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c (renamed from drivers/gpu/drm/radeon/radeon_fixed.h)69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c127
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/volt.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c145
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h113
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c500
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c240
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c559
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c327
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c446
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c462
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c500
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pll.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c246
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/seq.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h100
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c467
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c139
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c163
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c145
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c (renamed from drivers/gpu/drm/nouveau/nouveau_fb.h)39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c61
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c139
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c (renamed from drivers/gpu/drm/nouveau/nv50_mc.c)32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c313
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c116
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c152
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramgm107.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv04.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv10.c61
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv1a.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv20.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c472
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c439
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c732
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1392
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c236
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c116
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c292
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c574
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c234
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c291
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/port.h15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h85
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c171
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c226
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c142
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c159
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c274
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c193
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c233
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc454
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc252
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc219
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h1460
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h1589
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h1589
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h1524
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c374
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c282
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c224
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c197
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c234
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h156
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c264
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/base.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/gk20a.c57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c264
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c483
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c159
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c249
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c240
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c242
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/base.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c56
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Makefile11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c (renamed from drivers/gpu/drm/nouveau/nouveau_calc.c)255
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c (renamed from drivers/gpu/drm/nouveau/nv04_crtc.c)371
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c (renamed from drivers/gpu/drm/nouveau/nv04_cursor.c)16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c (renamed from drivers/gpu/drm/nouveau/nv04_dac.c)232
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c (renamed from drivers/gpu/drm/nouveau/nv04_dfp.c)315
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c208
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h188
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c (renamed from drivers/gpu/drm/nouveau/nouveau_hw.c)479
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h (renamed from drivers/gpu/drm/nouveau/nouveau_hw.h)200
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nvreg.h (renamed from drivers/gpu/drm/nouveau/nvreg.h)28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c497
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv_modes.c)63
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c (renamed from drivers/gpu/drm/nouveau/nv04_tv.c)198
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c (renamed from drivers/gpu/drm/nouveau/nv17_tv.c)357
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h (renamed from drivers/gpu/drm/nouveau/nv17_tv.h)23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c507
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c466
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c216
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5230
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h225
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1447
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c398
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c468
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1117
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c107
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c855
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h101
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c242
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c567
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1059
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h168
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c409
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1303
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c582
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c378
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c1081
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c652
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.h (renamed from drivers/gpu/drm/nouveau/nouveau_i2c.h)41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c269
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c702
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c572
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c1294
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c104
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h232
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c346
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c850
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c164
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c397
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c118
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c287
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c219
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c111
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c271
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c579
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c208
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c24
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c110
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c1003
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c149
-rw-r--r--drivers/gpu/drm/nouveau/nv17_gpio.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c780
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c62
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c314
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c428
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c769
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c156
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c304
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2833
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h25
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c232
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c123
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c494
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c389
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c509
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c309
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c276
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c262
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c90
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig26
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile21
-rw-r--r--drivers/gpu/drm/omapdrm/TODO23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c319
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c739
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c125
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h188
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c996
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h141
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c770
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h304
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c191
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c485
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c386
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c1485
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c201
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c342
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c455
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c703
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h95
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h328
-rw-r--r--drivers/gpu/drm/panel/Kconfig33
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c379
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c1070
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c702
-rw-r--r--drivers/gpu/drm/qxl/Kconfig15
-rw-r--r--drivers/gpu/drm/qxl/Makefile9
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c693
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c141
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h879
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c1043
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c487
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c264
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h570
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c86
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c715
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c91
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c117
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c237
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c454
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c100
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c341
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c324
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h105
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c359
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c580
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c65
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c57
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h128
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c8
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c11
-rw-r--r--drivers/gpu/drm/r128/r128_state.c225
-rw-r--r--drivers/gpu/drm/radeon/Kconfig35
-rw-r--r--drivers/gpu/drm/radeon/Makefile68
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h894
-rw-r--r--drivers/gpu/drm/radeon/atom.c291
-rw-r--r--drivers/gpu/drm/radeon/atom.h15
-rw-r--r--drivers/gpu/drm/radeon/atombios.h9606
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1961
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c1123
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2713
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c157
-rw-r--r--drivers/gpu/drm/radeon/avivod.h2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c2815
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h59
-rw-r--r--drivers/gpu/drm/radeon/btcd.h185
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c320
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h35
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5327
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h332
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c280
-rw-r--r--drivers/gpu/drm/radeon/cik.c9466
-rw-r--r--drivers/gpu/drm/radeon/cik_blit_shaders.c246
-rw-r--r--drivers/gpu/drm/radeon/cik_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h150
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c870
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2072
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h1081
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h944
-rw-r--r--drivers/gpu/drm/radeon/clearstate_defs.h44
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h1080
-rw-r--r--drivers/gpu/drm/radeon/clearstate_si.h941
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2162
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.h160
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c244
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c352
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5713
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c303
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.h35
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c3517
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c184
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c489
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h254
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h67
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2572
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2830
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h200
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c215
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c24
-rw-r--r--drivers/gpu/drm/radeon/ni.c2520
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c408
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c4364
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.h250
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h86
-rw-r--r--drivers/gpu/drm/radeon/nid.h1286
-rw-r--r--drivers/gpu/drm/radeon/nislands_smc.h329
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h175
-rw-r--r--drivers/gpu/drm/radeon/pptable.h682
-rw-r--r--drivers/gpu/drm/radeon/r100.c2865
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h127
-rw-r--r--drivers/gpu/drm/radeon/r100d.h179
-rw-r--r--drivers/gpu/drm/radeon/r200.c150
-rw-r--r--drivers/gpu/drm/radeon/r300.c772
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c291
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h8
-rw-r--r--drivers/gpu/drm/radeon/r300d.h61
-rw-r--r--drivers/gpu/drm/radeon/r420.c217
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h120
-rw-r--r--drivers/gpu/drm/radeon/r520.c104
-rw-r--r--drivers/gpu/drm/radeon/r600.c3118
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c296
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c168
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c813
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c1169
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h24
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c338
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2407
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c500
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c1357
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h233
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c706
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h60
-rw-r--r--drivers/gpu/drm/radeon/r600d.h1468
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2659
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c790
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h445
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2569
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1115
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c3207
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c583
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c235
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c317
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c186
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c1623
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c1474
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c82
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c795
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c163
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1616
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1603
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c512
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h58
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1421
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c421
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c929
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c258
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c498
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c1009
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c419
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c748
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c271
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c654
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h583
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c468
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h104
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1652
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h94
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c905
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c419
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c205
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c311
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c483
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h191
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c732
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h156
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c964
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c771
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c1089
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman642
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen644
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r30019
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r420780
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600756
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60087
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv51546
-rw-r--r--drivers/gpu/drm/radeon/rs400.c195
-rw-r--r--drivers/gpu/drm/radeon/rs600.c706
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h147
-rw-r--r--drivers/gpu/drm/radeon/rs690.c736
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h6
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c1053
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.h109
-rw-r--r--drivers/gpu/drm/radeon/rs780d.h171
-rw-r--r--drivers/gpu/drm/radeon/rv515.c873
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h57
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2112
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.h95
-rw-r--r--drivers/gpu/drm/radeon/rv6xxd.h246
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c508
-rw-r--r--drivers/gpu/drm/radeon/rv730d.h165
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c416
-rw-r--r--drivers/gpu/drm/radeon/rv740d.h117
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1586
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c97
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c2537
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h286
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c631
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h207
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h677
-rw-r--r--drivers/gpu/drm/radeon/si.c7318
-rw-r--r--drivers/gpu/drm/radeon/si_blit_shaders.c253
-rw-r--r--drivers/gpu/drm/radeon/si_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c243
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6499
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h227
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h105
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c298
-rw-r--r--drivers/gpu/drm/radeon/sid.h1849
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h395
-rw-r--r--drivers/gpu/drm/radeon/smu7.h170
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h486
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h300
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c1912
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h223
-rw-r--r--drivers/gpu/drm/radeon/sumo_smc.c221
-rw-r--r--drivers/gpu/drm/radeon/sumod.h372
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c1987
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h134
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c127
-rw-r--r--drivers/gpu/drm/radeon/trinityd.h228
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c444
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c165
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c57
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c68
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c187
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c181
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig19
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c610
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h55
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c320
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h107
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c202
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h49
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c293
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h39
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c124
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h25
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c192
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h46
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c516
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h81
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h513
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c89
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h69
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c114
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c52
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h7
-rw-r--r--drivers/gpu/drm/savage/savage_state.c23
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig13
-rw-r--r--drivers/gpu/drm/shmobile/Makefile7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.c90
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.h23
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c756
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h60
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c360
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.h47
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c160
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h34
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c263
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h22
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h311
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c91
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h13
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c249
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c47
-rw-r--r--drivers/gpu/drm/tegra/Kconfig43
-rw-r--r--drivers/gpu/drm/tegra/Makefile18
-rw-r--r--drivers/gpu/drm/tegra/dc.c1446
-rw-r--r--drivers/gpu/drm/tegra/dc.h440
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c573
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h74
-rw-r--r--drivers/gpu/drm/tegra/drm.c776
-rw-r--r--drivers/gpu/drm/tegra/drm.h297
-rw-r--r--drivers/gpu/drm/tegra/dsi.c993
-rw-r--r--drivers/gpu/drm/tegra/dsi.h130
-rw-r--r--drivers/gpu/drm/tegra/fb.c400
-rw-r--r--drivers/gpu/drm/tegra/gem.c445
-rw-r--r--drivers/gpu/drm/tegra/gem.h59
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c219
-rw-r--r--drivers/gpu/drm/tegra/gr2d.h28
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c356
-rw-r--r--drivers/gpu/drm/tegra/gr3d.h27
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1581
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h541
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.c124
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.h51
-rw-r--r--drivers/gpu/drm/tegra/output.c331
-rw-r--r--drivers/gpu/drm/tegra/rgb.c325
-rw-r--r--drivers/gpu/drm/tegra/sor.c1466
-rw-r--r--drivers/gpu/drm/tegra/sor.h282
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig14
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile13
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c685
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c642
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h173
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c437
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h155
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c406
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c419
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.h26
-rw-r--r--drivers/gpu/drm/ttm/Makefile5
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1276
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c155
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c359
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c435
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c193
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c21
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c47
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c450
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c931
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1147
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c491
-rw-r--r--drivers/gpu/drm/udl/Kconfig14
-rw-r--r--drivers/gpu/drm/udl/Makefile6
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c164
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c117
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h144
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c80
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c663
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c292
-rw-r--r--drivers/gpu/drm/udl/udl_main.c335
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c438
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c264
-rw-r--r--drivers/gpu/drm/via/via_dma.c169
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c98
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h8
-rw-r--r--drivers/gpu/drm/via/via_drv.c78
-rw-r--r--drivers/gpu/drm/via/via_drv.h33
-rw-r--r--drivers/gpu/drm/via/via_irq.c32
-rw-r--r--drivers/gpu/drm/via/via_map.c18
-rw-r--r--drivers/gpu/drm/via/via_mm.c154
-rw-r--r--drivers/gpu/drm/via/via_verifier.c52
-rw-r--r--drivers/gpu/drm/via/via_verifier.h4
-rw-r--r--drivers/gpu/drm/via/via_video.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig18
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h1014
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h912
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h22
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h319
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c746
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c903
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c315
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1109
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h791
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2588
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c206
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c1155
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h120
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c290
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c172
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c375
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1814
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h69
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c344
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c171
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c656
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c227
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1810
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c576
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c811
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c1431
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c29
1393 files changed, 525431 insertions, 75160 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd17e05..f5120046ff8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,9 +6,11 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
+ depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
+ select HDMI
select I2C
select I2C_ALGOBIT
+ select DMA_SHARED_BUFFER
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -16,15 +18,44 @@ menuconfig DRM
These modules provide support for synchronization, security, and
DMA transfers. Please see <http://dri.sourceforge.net/> for more
details. You should also select and configure AGP
- (/dev/agpgart) support.
+ (/dev/agpgart) support if it is available for your platform.
+
+config DRM_MIPI_DSI
+ bool
+ depends on DRM
+
+config DRM_USB
+ tristate
+ depends on DRM
+ depends on USB_SUPPORT && USB_ARCH_HAS_HCD
+ select USB
config DRM_KMS_HELPER
tristate
depends on DRM
+ help
+ CRTC helpers for KMS drivers.
+
+config DRM_KMS_FB_HELPER
+ bool
+ depends on DRM_KMS_HELPER
select FB
- select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FRAMEBUFFER_CONSOLE if !EXPERT
+ select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+ help
+ FBDEV helpers for KMS drivers.
+
+config DRM_LOAD_EDID_FIRMWARE
+ bool "Allow to specify an EDID data set instead of probing for it"
+ depends on DRM_KMS_HELPER
help
- FB and CRTC helpers for KMS drivers.
+ Say Y here, if you want to use EDID data to be loaded from the
+ /lib/firmware directory or one of the provided built-in
+ data sets. This may be necessary, if the graphics adapter or
+ monitor are unable to provide appropriate EDID data. Since this
+ feature is provided as a workaround for broken hardware, the
+ default case is N. Details and instructions how to build your own
+ EDID data are given in Documentation/EDID/HOWTO.txt.
config DRM_TTM
tristate
@@ -34,6 +65,26 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_GEM_CMA_HELPER
+ bool
+ depends on DRM
+ help
+ Choose this if you need the GEM CMA helper functions
+
+config DRM_KMS_CMA_HELPER
+ bool
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ help
+ Choose this if you need the KMS CMA helper functions
+
+source "drivers/gpu/drm/i2c/Kconfig"
+
+source "drivers/gpu/drm/bridge/Kconfig"
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -58,7 +109,11 @@ config DRM_RADEON
select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
+ select POWER_SUPPLY
+ select HWMON
+ select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -66,67 +121,24 @@ config DRM_RADEON
If M is selected, the module will be called radeon.
+source "drivers/gpu/drm/radeon/Kconfig"
+
+source "drivers/gpu/drm/nouveau/Kconfig"
+
config DRM_I810
tristate "Intel I810"
- depends on DRM && AGP && AGP_INTEL
+ # !PREEMPT because of missing ioctl locking
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
for this driver to work.
-choice
- prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
- depends on DRM && AGP && AGP_INTEL
- optional
-
-config DRM_I830
- tristate "i830 driver"
- help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM or 865G integrated graphics. If M is selected, the
- module will be called i830. AGP support is required for this driver
- to work. This driver is used by the older X releases X.org 6.7 and
- XFree86 4.3. If unsure, build this and i915 as modules and the X server
- will load the correct one.
-
-config DRM_I915
- tristate "i915 driver"
- depends on AGP_INTEL
- select SHMEM
- select DRM_KMS_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- # i915 depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select VIDEO_OUTPUT_CONTROL if ACPI
- select BACKLIGHT_CLASS_DEVICE if ACPI
- select INPUT if ACPI
- select ACPI_VIDEO if ACPI
- select ACPI_BUTTON if ACPI
- help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
- module will be called i915. AGP support is required for this driver
- to work. This driver is used by the Intel driver in X.org 6.8 and
- XFree86 4.4 and above. If unsure, build this and i830 as modules and
- the X server will load the correct one.
-
-config DRM_I915_KMS
- bool "Enable modesetting on intel by default"
- depends on DRM_I915
- help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain. Note that this causes
- the driver to bind to PCI devices, which precludes loading things
- like intelfb.
-
-endchoice
+source "drivers/gpu/drm/i915/Kconfig"
config DRM_MGA
tristate "Matrox g200/g400"
- depends on DRM
+ depends on DRM && PCI
select FW_LOADER
help
Choose this option if you have a Matrox G200, G400 or G450 graphics
@@ -144,14 +156,48 @@ config DRM_SIS
config DRM_VIA
tristate "Via unichrome video cards"
- depends on DRM
+ depends on DRM && PCI
help
Choose this option if you have a Via unichrome or compatible video
chipset. If M is selected the module will be called via.
config DRM_SAVAGE
tristate "Savage video cards"
- depends on DRM
+ depends on DRM && PCI
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
+
+source "drivers/gpu/drm/exynos/Kconfig"
+
+source "drivers/gpu/drm/vmwgfx/Kconfig"
+
+source "drivers/gpu/drm/gma500/Kconfig"
+
+source "drivers/gpu/drm/udl/Kconfig"
+
+source "drivers/gpu/drm/ast/Kconfig"
+
+source "drivers/gpu/drm/mgag200/Kconfig"
+
+source "drivers/gpu/drm/cirrus/Kconfig"
+
+source "drivers/gpu/drm/armada/Kconfig"
+
+source "drivers/gpu/drm/rcar-du/Kconfig"
+
+source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/omapdrm/Kconfig"
+
+source "drivers/gpu/drm/tilcdc/Kconfig"
+
+source "drivers/gpu/drm/qxl/Kconfig"
+
+source "drivers/gpu/drm/bochs/Kconfig"
+
+source "drivers/gpu/drm/msm/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/panel/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 39c5aa75b8f..dd2ba426974 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,33 +4,65 @@
ccflags-y := -Iinclude/drm
-drm-y := drm_auth.o drm_bufs.o drm_cache.o \
- drm_context.o drm_dma.o drm_drawable.o \
+drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
+ drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
+ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
- drm_info.o drm_debugfs.o drm_encoder_slave.o
+ drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_trace_points.o drm_global.o drm_prime.o \
+ drm_rect.o drm_vma_manager.o drm_flip_work.o \
+ drm_modeset_lock.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
+drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_PANEL) += drm_panel.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm-usb-y := drm_usb.o
+
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+ drm_plane_helper.o
+drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
+drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+CFLAGS_drm_trace_points.o := -I$(src)
+
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+obj-$(CONFIG_DRM_USB) += drm_usb.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
+obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_GMA500) += gma500/
+obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_ARMADA) += armada/
+obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
+obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
+obj-$(CONFIG_DRM_TILCDC) += tilcdc/
+obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_BOCHS) += bochs/
+obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
+obj-y += panel/
+obj-y += bridge/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644
index 00000000000..50ae88ad4d7
--- /dev/null
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -0,0 +1,25 @@
+config DRM_ARMADA
+ tristate "DRM support for Marvell Armada SoCs"
+ depends on DRM && HAVE_CLK && ARM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ help
+ Support the "LCD" controllers found on the Marvell Armada 510
+ devices. There are two controllers on the device, each controller
+ supports graphics and video overlays.
+
+ This driver provides no built-in acceleration; acceleration is
+ performed by other IP found on the SoC. This driver provides
+ kernel mode setting and buffer management to userspace.
+
+config DRM_ARMADA_TDA1998X
+ bool "Support TDA1998X HDMI output"
+ depends on DRM_ARMADA != n
+ depends on I2C && DRM_I2C_NXP_TDA998X = y
+ default y
+ help
+ Support the TDA1998x HDMI output device found on the Solid-Run
+ CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644
index 00000000000..d6f43e06150
--- /dev/null
+++ b/drivers/gpu/drm/armada/Makefile
@@ -0,0 +1,7 @@
+armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
+ armada_gem.o armada_output.o armada_overlay.o \
+ armada_slave.o
+armada-y += armada_510.o
+armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+
+obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644
index 00000000000..59948eff609
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Armada 510 (aka Dove) variant support
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_hw.h"
+
+static int armada510_init(struct armada_private *priv, struct device *dev)
+{
+ priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+
+ if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
+ priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+
+ return PTR_RET(priv->extclk[0]);
+}
+
+static int armada510_crtc_init(struct armada_crtc *dcrtc)
+{
+ /* Lower the watermark so to eliminate jitter at higher bandwidths */
+ armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+ return 0;
+}
+
+/*
+ * Armada510 specific SCLK register selection.
+ * This gets called with sclk = NULL to test whether the mode is
+ * supportable, and again with sclk != NULL to set the clocks up for
+ * that. The former can return an error, but the latter is expected
+ * not to.
+ *
+ * We currently are pretty rudimentary here, always selecting
+ * EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement!
+ */
+static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
+ const struct drm_display_mode *mode, uint32_t *sclk)
+{
+ struct armada_private *priv = dcrtc->crtc.dev->dev_private;
+ struct clk *clk = priv->extclk[0];
+ int ret;
+
+ if (dcrtc->num == 1)
+ return -EINVAL;
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (dcrtc->clk != clk) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+ dcrtc->clk = clk;
+ }
+
+ if (sclk) {
+ uint32_t rate, ref, div;
+
+ rate = mode->clock * 1000;
+ ref = clk_round_rate(clk, rate);
+ div = DIV_ROUND_UP(ref, rate);
+ if (div < 1)
+ div = 1;
+
+ clk_set_rate(clk, ref);
+ *sclk = div | SCLK_510_EXTCLK1;
+ }
+
+ return 0;
+}
+
+const struct armada_variant armada510_ops = {
+ .has_spu_adv_reg = true,
+ .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+ .init = armada510_init,
+ .crtc_init = armada510_crtc_init,
+ .crtc_compute_clock = armada510_crtc_compute_clock,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644
index 00000000000..81c34f949df
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+struct armada_frame_work {
+ struct drm_pending_vblank_event *event;
+ struct armada_regs regs[4];
+ struct drm_framebuffer *old_fb;
+};
+
+enum csc_mode {
+ CSC_AUTO = 0,
+ CSC_YUV_CCIR601 = 1,
+ CSC_YUV_CCIR709 = 2,
+ CSC_RGB_COMPUTER = 1,
+ CSC_RGB_STUDIO = 2,
+};
+
+/*
+ * A note about interlacing. Let's consider HDMI 1920x1080i.
+ * The timing parameters we have from X are:
+ * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
+ * 1920 2448 2492 2640 1080 1084 1094 1125
+ * Which get translated to:
+ * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
+ * 1920 2448 2492 2640 540 542 547 562
+ *
+ * This is how it is defined by CEA-861-D - line and pixel numbers are
+ * referenced to the rising edge of VSYNC and HSYNC. Total clocks per
+ * line: 2640. The odd frame, the first active line is at line 21, and
+ * the even frame, the first active line is 584.
+ *
+ * LN: 560 561 562 563 567 568 569
+ * DE: ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
+ * 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge).
+ *
+ * LN: 1123 1124 1125 1 5 6 7
+ * DE: ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
+ * 23 blanking lines
+ *
+ * The Armada LCD Controller line and pixel numbers are, like X timings,
+ * referenced to the top left of the active frame.
+ *
+ * So, translating these to our LCD controller:
+ * Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
+ * Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
+ * Note: Vsync front porch remains constant!
+ *
+ * if (odd_frame) {
+ * vtotal = mode->crtc_vtotal + 1;
+ * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
+ * vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
+ * } else {
+ * vtotal = mode->crtc_vtotal;
+ * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ * vhorizpos = mode->crtc_hsync_start;
+ * }
+ * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
+ *
+ * So, we need to reprogram these registers on each vsync event:
+ * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *
+ * Note: we do not use the frame done interrupts because these appear
+ * to happen too early, and lead to jitter on the display (presumably
+ * they occur at the end of the last active line, before the vsync back
+ * porch, which we're reprogramming.)
+ */
+
+void
+armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
+{
+ while (regs->offset != ~0) {
+ void __iomem *reg = dcrtc->base + regs->offset;
+ uint32_t val;
+
+ val = regs->mask;
+ if (val != 0)
+ val &= readl_relaxed(reg);
+ writel_relaxed(val | regs->val, reg);
+ ++regs;
+ }
+}
+
+#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
+
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+{
+ uint32_t dumb_ctrl;
+
+ dumb_ctrl = dcrtc->cfg_dumb_ctrl;
+
+ if (!dpms_blanked(dcrtc->dpms))
+ dumb_ctrl |= CFG_DUMB_ENA;
+
+ /*
+ * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
+ * be using SPI or GPIO. If we set this to DUMB_BLANK, we will
+ * force LCD_D[23:0] to output blank color, overriding the GPIO or
+ * SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
+ */
+ if (dpms_blanked(dcrtc->dpms) &&
+ (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+ dumb_ctrl &= ~DUMB_MASK;
+ dumb_ctrl |= DUMB_BLANK;
+ }
+
+ /*
+ * The documentation doesn't indicate what the normal state of
+ * the sync signals are. Sebastian Hesselbart kindly probed
+ * these signals on his board to determine their state.
+ *
+ * The non-inverted state of the sync signals is active high.
+ * Setting these bits makes the appropriate signal active low.
+ */
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
+ dumb_ctrl |= CFG_INV_CSYNC;
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
+ dumb_ctrl |= CFG_INV_HSYNC;
+ if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
+ dumb_ctrl |= CFG_INV_VSYNC;
+
+ if (dcrtc->dumb_ctrl != dumb_ctrl) {
+ dcrtc->dumb_ctrl = dumb_ctrl;
+ writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
+ }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
+ int x, int y, struct armada_regs *regs, bool interlaced)
+{
+ struct armada_gem_object *obj = drm_fb_obj(fb);
+ unsigned pitch = fb->pitches[0];
+ unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
+ uint32_t addr_odd, addr_even;
+ unsigned i = 0;
+
+ DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
+ pitch, x, y, fb->bits_per_pixel);
+
+ addr_odd = addr_even = obj->dev_addr + offset;
+
+ if (interlaced) {
+ addr_even += pitch;
+ pitch *= 2;
+ }
+
+ /* write offset, base, and pitch */
+ armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
+ armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
+ armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
+
+ return i;
+}
+
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+ struct armada_frame_work *work)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+ unsigned long flags;
+ int ret;
+
+ ret = drm_vblank_get(dev, dcrtc->num);
+ if (ret) {
+ DRM_ERROR("failed to acquire vblank counter\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (!dcrtc->frame_work)
+ dcrtc->frame_work = work;
+ else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (ret)
+ drm_vblank_put(dev, dcrtc->num);
+
+ return ret;
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+ struct armada_frame_work *work = dcrtc->frame_work;
+
+ dcrtc->frame_work = NULL;
+
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
+
+ if (work->event)
+ drm_send_vblank_event(dev, dcrtc->num, work->event);
+
+ drm_vblank_put(dev, dcrtc->num);
+
+ /* Finally, queue the process-half of the cleanup. */
+ __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
+ kfree(work);
+}
+
+static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
+ struct drm_framebuffer *fb, bool force)
+{
+ struct armada_frame_work *work;
+
+ if (!fb)
+ return;
+
+ if (force) {
+ /* Display is disabled, so just drop the old fb */
+ drm_framebuffer_unreference(fb);
+ return;
+ }
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (work) {
+ int i = 0;
+ work->event = NULL;
+ work->old_fb = fb;
+ armada_reg_queue_end(work->regs, i);
+
+ if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+ return;
+
+ kfree(work);
+ }
+
+ /*
+ * Oops - just drop the reference immediately and hope for
+ * the best. The worst that will happen is the buffer gets
+ * reused before it has finished being displayed.
+ */
+ drm_framebuffer_unreference(fb);
+}
+
+static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
+{
+ struct drm_device *dev = dcrtc->crtc.dev;
+
+ /*
+ * Tell the DRM core that vblank IRQs aren't going to happen for
+ * a while. This cleans up any pending vblank events for us.
+ */
+ drm_vblank_off(dev, dcrtc->num);
+
+ /* Handle any pending flip event. */
+ spin_lock_irq(&dev->event_lock);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock_irq(&dev->event_lock);
+}
+
+void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+ int idx)
+{
+}
+
+void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ int idx)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (dpms_blanked(dpms))
+ armada_drm_vblank_off(dcrtc);
+ }
+}
+
+/*
+ * Prepare for a mode set. Turn off overlay to ensure that we don't end
+ * up with the overlay size being bigger than the active screen size.
+ * We rely upon X refreshing this state after the mode set has completed.
+ *
+ * The mode_config.mutex will be held for this call
+ */
+static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_plane *plane;
+
+ /*
+ * If we have an overlay plane associated with this CRTC, disable
+ * it before the modeset to avoid its coordinates being outside
+ * the new mode parameters. DRM doesn't provide help with this.
+ */
+ plane = dcrtc->plane;
+ if (plane) {
+ struct drm_framebuffer *fb = plane->fb;
+
+ plane->funcs->disable_plane(plane);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ drm_framebuffer_unreference(fb);
+ }
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
+ dcrtc->dpms = DRM_MODE_DPMS_ON;
+ armada_drm_crtc_update(dcrtc);
+ }
+}
+
+/* The mode_config.mutex will be held for this call */
+static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode, struct drm_display_mode *adj)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ int ret;
+
+ /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
+ if (!priv->variant->has_spu_adv_reg &&
+ adj->flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ /* Check whether the display mode is possible */
+ ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+{
+ struct armada_vbl_event *e, *n;
+ void __iomem *base = dcrtc->base;
+
+ if (stat & DMA_FF_UNDERFLOW)
+ DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
+ if (stat & GRA_FF_UNDERFLOW)
+ DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
+
+ if (stat & VSYNC_IRQ)
+ drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+
+ spin_lock(&dcrtc->irq_lock);
+
+ list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
+ list_del_init(&e->node);
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ e->fn(dcrtc, e->data);
+ }
+
+ if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
+ int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
+ uint32_t val;
+
+ writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
+ writel_relaxed(dcrtc->v[i].spu_v_h_total,
+ base + LCD_SPUT_V_H_TOTAL);
+
+ val = readl_relaxed(base + LCD_SPU_ADV_REG);
+ val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
+ val |= dcrtc->v[i].spu_adv_reg;
+ writel_relaxed(val, base + LCD_SPU_ADV_REG);
+ }
+
+ if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
+ writel_relaxed(dcrtc->cursor_hw_pos,
+ base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->cursor_hw_sz,
+ base + LCD_SPU_HWC_HPXL_VLN);
+ armada_updatel(CFG_HWC_ENA,
+ CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
+ base + LCD_SPU_DMA_CTRL0);
+ dcrtc->cursor_update = false;
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ }
+
+ spin_unlock(&dcrtc->irq_lock);
+
+ if (stat & GRA_FRAME_IRQ) {
+ struct drm_device *dev = dcrtc->crtc.dev;
+
+ spin_lock(&dev->event_lock);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock(&dev->event_lock);
+
+ wake_up(&dcrtc->frame_wait);
+ }
+}
+
+/* These are locked by dev->vbl_lock */
+void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+ if (dcrtc->irq_ena & mask) {
+ dcrtc->irq_ena &= ~mask;
+ writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ }
+}
+
+void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+ if ((dcrtc->irq_ena & mask) != mask) {
+ dcrtc->irq_ena |= mask;
+ writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ }
+}
+
+static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
+{
+ struct drm_display_mode *adj = &dcrtc->crtc.mode;
+ uint32_t val = 0;
+
+ if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
+ val |= CFG_CSC_YUV_CCIR709;
+ if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
+ val |= CFG_CSC_RGB_STUDIO;
+
+ /*
+ * In auto mode, set the colorimetry, based upon the HDMI spec.
+ * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
+ * ITU601. It may be more appropriate to set this depending on
+ * the source - but what if the graphic frame is YUV and the
+ * video frame is RGB?
+ */
+ if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
+ !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
+ (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
+ if (dcrtc->csc_yuv_mode == CSC_AUTO)
+ val |= CFG_CSC_YUV_CCIR709;
+ }
+
+ /*
+ * We assume we're connected to a TV-like device, so the YUV->RGB
+ * conversion should produce a limited range. We should set this
+ * depending on the connectors attached to this CRTC, and what
+ * kind of device they report being connected.
+ */
+ if (dcrtc->csc_rgb_mode == CSC_AUTO)
+ val |= CFG_CSC_RGB_STUDIO;
+
+ return val;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode, struct drm_display_mode *adj,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[17];
+ uint32_t lm, rm, tm, bm, val, sclk;
+ unsigned long flags;
+ unsigned i;
+ bool interlaced;
+
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
+
+ i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb,
+ x, y, regs, interlaced);
+
+ rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
+ lm = adj->crtc_htotal - adj->crtc_hsync_end;
+ bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
+ tm = adj->crtc_vtotal - adj->crtc_vsync_end;
+
+ DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
+ adj->crtc_hdisplay,
+ adj->crtc_hsync_start,
+ adj->crtc_hsync_end,
+ adj->crtc_htotal, lm, rm);
+ DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
+ adj->crtc_vdisplay,
+ adj->crtc_vsync_start,
+ adj->crtc_vsync_end,
+ adj->crtc_vtotal, tm, bm);
+
+ /* Wait for pending flips to complete */
+ wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+ drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
+
+ crtc->mode = *adj;
+
+ val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
+ if (val != dcrtc->dumb_ctrl) {
+ dcrtc->dumb_ctrl = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
+ }
+
+ /* Now compute the divider for real */
+ priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+
+ /* Ensure graphic fifo is enabled */
+ armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+ armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
+
+ if (interlaced ^ dcrtc->interlaced) {
+ if (adj->flags & DRM_MODE_FLAG_INTERLACE)
+ drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ else
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ dcrtc->interlaced = interlaced;
+ }
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+
+ /* Even interlaced/progressive frame */
+ dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
+ adj->crtc_htotal;
+ dcrtc->v[1].spu_v_porch = tm << 16 | bm;
+ val = adj->crtc_hsync_start;
+ dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+ priv->variant->spu_adv_reg;
+
+ if (interlaced) {
+ /* Odd interlaced frame */
+ dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
+ (1 << 16);
+ dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
+ val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
+ dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+ priv->variant->spu_adv_reg;
+ } else {
+ dcrtc->v[0] = dcrtc->v[1];
+ }
+
+ val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+
+ armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
+ armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
+ armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
+ armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
+ LCD_SPUT_V_H_TOTAL);
+
+ if (priv->variant->has_spu_adv_reg) {
+ armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
+ ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
+ ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
+ }
+
+ val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
+
+ if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+
+ if (interlaced)
+ val |= CFG_GRA_FTOGGLE;
+
+ armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ LCD_SPU_DMA_CTRL0);
+
+ val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
+ armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
+
+ val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
+ armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+ armada_reg_queue_end(regs, i);
+
+ armada_drm_crtc_update_regs(dcrtc, regs);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+ armada_drm_crtc_update(dcrtc);
+
+ drm_vblank_post_modeset(crtc->dev, dcrtc->num);
+ armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+ return 0;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[4];
+ unsigned i;
+
+ i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
+ dcrtc->interlaced);
+ armada_reg_queue_end(regs, i);
+
+ /* Wait for pending flips to complete */
+ wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+ /* Take a reference to the new fb as we're using it */
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ /* Update the base in the CRTC */
+ armada_drm_crtc_update_regs(dcrtc, regs);
+
+ /* Drop our previously held reference */
+ armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+ return 0;
+}
+
+static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ armada_drm_crtc_finish_fb(dcrtc, crtc->primary->fb, true);
+
+ /* Power down most RAMs and FIFOs */
+ writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+ CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
+static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
+ .dpms = armada_drm_crtc_dpms,
+ .prepare = armada_drm_crtc_prepare,
+ .commit = armada_drm_crtc_commit,
+ .mode_fixup = armada_drm_crtc_mode_fixup,
+ .mode_set = armada_drm_crtc_mode_set,
+ .mode_set_base = armada_drm_crtc_mode_set_base,
+ .load_lut = armada_drm_crtc_load_lut,
+ .disable = armada_drm_crtc_disable,
+};
+
+static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
+ unsigned stride, unsigned width, unsigned height)
+{
+ uint32_t addr;
+ unsigned y;
+
+ addr = SRAM_HWC32_RAM1;
+ for (y = 0; y < height; y++) {
+ uint32_t *p = &pix[y * stride];
+ unsigned x;
+
+ for (x = 0; x < width; x++, p++) {
+ uint32_t val = *p;
+
+ val = (val & 0xff00ff00) |
+ (val & 0x000000ff) << 16 |
+ (val & 0x00ff0000) >> 16;
+
+ writel_relaxed(val,
+ base + LCD_SPU_SRAM_WRDAT);
+ writel_relaxed(addr | SRAM_WRITE,
+ base + LCD_SPU_SRAM_CTRL);
+ readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ addr += 1;
+ if ((addr & 0x00ff) == 0)
+ addr += 0xf00;
+ if ((addr & 0x30ff) == 0)
+ addr = SRAM_HWC32_RAM2;
+ }
+ }
+}
+
+static void armada_drm_crtc_cursor_tran(void __iomem *base)
+{
+ unsigned addr;
+
+ for (addr = 0; addr < 256; addr++) {
+ /* write the default value */
+ writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
+ writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
+ base + LCD_SPU_SRAM_CTRL);
+ }
+}
+
+static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
+{
+ uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
+ uint32_t yoff, yscr, h = dcrtc->cursor_h;
+ uint32_t para1;
+
+ /*
+ * Calculate the visible width and height of the cursor,
+ * screen position, and the position in the cursor bitmap.
+ */
+ if (dcrtc->cursor_x < 0) {
+ xoff = -dcrtc->cursor_x;
+ xscr = 0;
+ w -= min(xoff, w);
+ } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
+ xoff = 0;
+ xscr = dcrtc->cursor_x;
+ w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
+ } else {
+ xoff = 0;
+ xscr = dcrtc->cursor_x;
+ }
+
+ if (dcrtc->cursor_y < 0) {
+ yoff = -dcrtc->cursor_y;
+ yscr = 0;
+ h -= min(yoff, h);
+ } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
+ yoff = 0;
+ yscr = dcrtc->cursor_y;
+ h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
+ } else {
+ yoff = 0;
+ yscr = dcrtc->cursor_y;
+ }
+
+ /* On interlaced modes, the vertical cursor size must be halved */
+ s = dcrtc->cursor_w;
+ if (dcrtc->interlaced) {
+ s *= 2;
+ yscr /= 2;
+ h /= 2;
+ }
+
+ if (!dcrtc->cursor_obj || !h || !w) {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ dcrtc->cursor_update = false;
+ armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ return 0;
+ }
+
+ para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
+ armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+ /*
+ * Initialize the transparency if the SRAM was powered down.
+ * We must also reload the cursor data as well.
+ */
+ if (!(para1 & CFG_CSB_256x32)) {
+ armada_drm_crtc_cursor_tran(dcrtc->base);
+ reload = true;
+ }
+
+ if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ dcrtc->cursor_update = false;
+ armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ reload = true;
+ }
+ if (reload) {
+ struct armada_gem_object *obj = dcrtc->cursor_obj;
+ uint32_t *pix;
+ /* Set the top-left corner of the cursor image */
+ pix = obj->addr;
+ pix += yoff * s + xoff;
+ armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
+ }
+
+ /* Reload the cursor position, size and enable in the IRQ handler */
+ spin_lock_irq(&dcrtc->irq_lock);
+ dcrtc->cursor_hw_pos = yscr << 16 | xscr;
+ dcrtc->cursor_hw_sz = h << 16 | w;
+ dcrtc->cursor_update = true;
+ armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ spin_unlock_irq(&dcrtc->irq_lock);
+
+ return 0;
+}
+
+static void cursor_update(void *data)
+{
+ armada_drm_crtc_cursor_update(data, true);
+}
+
+static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
+{
+ struct drm_device *dev = crtc->dev;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_gem_object *obj = NULL;
+ int ret;
+
+ /* If no cursor support, replicate drm's return value */
+ if (!priv->variant->has_spu_adv_reg)
+ return -ENXIO;
+
+ if (handle && w > 0 && h > 0) {
+ /* maximum size is 64x32 or 32x64 */
+ if (w > 64 || h > 64 || (w > 32 && h > 32))
+ return -ENOMEM;
+
+ obj = armada_gem_object_lookup(dev, file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ /* Must be a kernel-mapped object */
+ if (!obj->addr) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -EINVAL;
+ }
+
+ if (obj->obj.size < w * h * 4) {
+ DRM_ERROR("buffer is too small\n");
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -ENOMEM;
+ }
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (dcrtc->cursor_obj) {
+ dcrtc->cursor_obj->update = NULL;
+ dcrtc->cursor_obj->update_data = NULL;
+ drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+ }
+ dcrtc->cursor_obj = obj;
+ dcrtc->cursor_w = w;
+ dcrtc->cursor_h = h;
+ ret = armada_drm_crtc_cursor_update(dcrtc, true);
+ if (obj) {
+ obj->update_data = dcrtc;
+ obj->update = cursor_update;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+ int ret;
+
+ /* If no cursor support, replicate drm's return value */
+ if (!priv->variant->has_spu_adv_reg)
+ return -EFAULT;
+
+ mutex_lock(&dev->struct_mutex);
+ dcrtc->cursor_x = x;
+ dcrtc->cursor_y = y;
+ ret = armada_drm_crtc_cursor_update(dcrtc, false);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_private *priv = crtc->dev->dev_private;
+
+ if (dcrtc->cursor_obj)
+ drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+
+ priv->dcrtc[dcrtc->num] = NULL;
+ drm_crtc_cleanup(&dcrtc->crtc);
+
+ if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+
+ kfree(dcrtc);
+}
+
+/*
+ * The mode_config lock is held here, to prevent races between this
+ * and a mode_set.
+ */
+static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_frame_work *work;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ unsigned i;
+ int ret;
+
+ /* We don't support changing the pixel format */
+ if (fb->pixel_format != crtc->primary->fb->pixel_format)
+ return -EINVAL;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ work->event = event;
+ work->old_fb = dcrtc->crtc.primary->fb;
+
+ i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
+ dcrtc->interlaced);
+ armada_reg_queue_end(work->regs, i);
+
+ /*
+ * Hold the old framebuffer for the work - DRM appears to drop our
+ * reference to the old framebuffer in drm_mode_page_flip_ioctl().
+ */
+ drm_framebuffer_reference(work->old_fb);
+
+ ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+ if (ret) {
+ /*
+ * Undo our reference above; DRM does not drop the reference
+ * to this object on error, so that's okay.
+ */
+ drm_framebuffer_unreference(work->old_fb);
+ kfree(work);
+ return ret;
+ }
+
+ /*
+ * Don't take a reference on the new framebuffer;
+ * drm_mode_page_flip_ioctl() has already grabbed a reference and
+ * will _not_ drop that reference on successful return from this
+ * function. Simply mark this new framebuffer as the current one.
+ */
+ dcrtc->crtc.primary->fb = fb;
+
+ /*
+ * Finally, if the display is blanked, we won't receive an
+ * interrupt, so complete it now.
+ */
+ if (dpms_blanked(dcrtc->dpms)) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dcrtc->frame_work)
+ armada_drm_crtc_complete_frame_work(dcrtc);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ return 0;
+}
+
+static int
+armada_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ bool update_csc = false;
+
+ if (property == priv->csc_yuv_prop) {
+ dcrtc->csc_yuv_mode = val;
+ update_csc = true;
+ } else if (property == priv->csc_rgb_prop) {
+ dcrtc->csc_rgb_mode = val;
+ update_csc = true;
+ }
+
+ if (update_csc) {
+ uint32_t val;
+
+ val = dcrtc->spu_iopad_ctrl |
+ armada_drm_crtc_calculate_csc(dcrtc);
+ writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+ }
+
+ return 0;
+}
+
+static struct drm_crtc_funcs armada_crtc_funcs = {
+ .cursor_set = armada_drm_crtc_cursor_set,
+ .cursor_move = armada_drm_crtc_cursor_move,
+ .destroy = armada_drm_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = armada_drm_crtc_page_flip,
+ .set_property = armada_drm_crtc_set_property,
+};
+
+static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+ { CSC_AUTO, "Auto" },
+ { CSC_YUV_CCIR601, "CCIR601" },
+ { CSC_YUV_CCIR709, "CCIR709" },
+};
+
+static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+ { CSC_AUTO, "Auto" },
+ { CSC_RGB_COMPUTER, "Computer system" },
+ { CSC_RGB_STUDIO, "Studio" },
+};
+
+static int armada_drm_crtc_create_properties(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ if (priv->csc_yuv_prop)
+ return 0;
+
+ priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
+ "CSC_YUV", armada_drm_csc_yuv_enum_list,
+ ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
+ priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
+ "CSC_RGB", armada_drm_csc_rgb_enum_list,
+ ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
+
+ if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
+ struct resource *res)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc;
+ void __iomem *base;
+ int ret;
+
+ ret = armada_drm_crtc_create_properties(dev);
+ if (ret)
+ return ret;
+
+ base = devm_request_and_ioremap(dev->dev, res);
+ if (!base) {
+ DRM_ERROR("failed to ioremap register\n");
+ return -ENOMEM;
+ }
+
+ dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
+ if (!dcrtc) {
+ DRM_ERROR("failed to allocate Armada crtc\n");
+ return -ENOMEM;
+ }
+
+ dcrtc->base = base;
+ dcrtc->num = num;
+ dcrtc->clk = ERR_PTR(-EINVAL);
+ dcrtc->csc_yuv_mode = CSC_AUTO;
+ dcrtc->csc_rgb_mode = CSC_AUTO;
+ dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
+ dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
+ spin_lock_init(&dcrtc->irq_lock);
+ dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
+ INIT_LIST_HEAD(&dcrtc->vbl_list);
+ init_waitqueue_head(&dcrtc->frame_wait);
+
+ /* Initialize some registers which we don't otherwise set */
+ writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
+ writel_relaxed(dcrtc->spu_iopad_ctrl,
+ dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
+ writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+ CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+ writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+ writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+
+ if (priv->variant->crtc_init) {
+ ret = priv->variant->crtc_init(dcrtc);
+ if (ret) {
+ kfree(dcrtc);
+ return ret;
+ }
+ }
+
+ /* Ensure AXI pipeline is enabled */
+ armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
+
+ priv->dcrtc[dcrtc->num] = dcrtc;
+
+ drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+ drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
+
+ drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
+ dcrtc->csc_yuv_mode);
+ drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
+ dcrtc->csc_rgb_mode);
+
+ return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644
index 00000000000..9c10a07e749
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CRTC_H
+#define ARMADA_CRTC_H
+
+struct armada_gem_object;
+
+struct armada_regs {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t val;
+};
+
+#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \
+ do { \
+ struct armada_regs *__reg = _r; \
+ __reg[_i].offset = _o; \
+ __reg[_i].mask = ~(_m); \
+ __reg[_i].val = _v; \
+ _i++; \
+ } while (0)
+
+#define armada_reg_queue_set(_r, _i, _v, _o) \
+ armada_reg_queue_mod(_r, _i, _v, ~0, _o)
+
+#define armada_reg_queue_end(_r, _i) \
+ armada_reg_queue_mod(_r, _i, 0, 0, ~0)
+
+struct armada_frame_work;
+
+struct armada_crtc {
+ struct drm_crtc crtc;
+ unsigned num;
+ void __iomem *base;
+ struct clk *clk;
+ struct {
+ uint32_t spu_v_h_total;
+ uint32_t spu_v_porch;
+ uint32_t spu_adv_reg;
+ } v[2];
+ bool interlaced;
+ bool cursor_update;
+ uint8_t csc_yuv_mode;
+ uint8_t csc_rgb_mode;
+
+ struct drm_plane *plane;
+
+ struct armada_gem_object *cursor_obj;
+ int cursor_x;
+ int cursor_y;
+ uint32_t cursor_hw_pos;
+ uint32_t cursor_hw_sz;
+ uint32_t cursor_w;
+ uint32_t cursor_h;
+
+ int dpms;
+ uint32_t cfg_dumb_ctrl;
+ uint32_t dumb_ctrl;
+ uint32_t spu_iopad_ctrl;
+
+ wait_queue_head_t frame_wait;
+ struct armada_frame_work *frame_work;
+
+ spinlock_t irq_lock;
+ uint32_t irq_ena;
+ struct list_head vbl_list;
+};
+#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
+
+int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
+void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
+void armada_drm_crtc_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644
index 00000000000..471e45627f1
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+
+static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct armada_private *priv = dev->dev_private;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mm_dump_table(m, &priv->linear);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int armada_debugfs_reg_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct armada_private *priv = dev->dev_private;
+ int n, i;
+
+ if (priv) {
+ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ struct armada_crtc *dcrtc = priv->dcrtc[n];
+ if (!dcrtc)
+ continue;
+
+ for (i = 0x84; i <= 0x1c4; i += 4) {
+ uint32_t v = readl_relaxed(dcrtc->base + i);
+ seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, armada_debugfs_reg_show, inode->i_private);
+}
+
+static const struct file_operations fops_reg_r = {
+ .owner = THIS_MODULE,
+ .open = armada_debugfs_reg_r_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int armada_debugfs_write(struct file *file, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ struct drm_device *dev = file->private_data;
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+ char buf[32], *p;
+ uint32_t reg, val;
+ int ret;
+
+ if (*off != 0)
+ return 0;
+
+ if (len > sizeof(buf) - 1)
+ len = sizeof(buf) - 1;
+
+ ret = strncpy_from_user(buf, ptr, len);
+ if (ret < 0)
+ return ret;
+ buf[len] = '\0';
+
+ reg = simple_strtoul(buf, &p, 16);
+ if (!isspace(*p))
+ return -EINVAL;
+ val = simple_strtoul(p + 1, NULL, 16);
+
+ if (reg >= 0x84 && reg <= 0x1c4)
+ writel(val, dcrtc->base + reg);
+
+ return len;
+}
+
+static const struct file_operations fops_reg_w = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = armada_debugfs_write,
+ .llseek = noop_llseek,
+};
+
+static struct drm_info_list armada_debugfs_list[] = {
+ { "gem_linear", armada_debugfs_gem_linear_show, 0 },
+};
+#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
+
+static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
+ const char *name, umode_t mode, const struct file_operations *fops)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, mode, root, minor->dev, fops);
+
+ return drm_add_fake_info_node(minor, de, fops);
+}
+
+int armada_drm_debugfs_init(struct drm_minor *minor)
+{
+ int ret;
+
+ ret = drm_debugfs_create_files(armada_debugfs_list,
+ ARMADA_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
+ ret = armada_debugfs_create(minor->debugfs_root, minor,
+ "reg", S_IFREG | S_IRUSR, &fops_reg_r);
+ if (ret)
+ goto err_1;
+
+ ret = armada_debugfs_create(minor->debugfs_root, minor,
+ "reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
+ if (ret)
+ goto err_2;
+ return ret;
+
+ err_2:
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ err_1:
+ drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+ minor);
+ return ret;
+}
+
+void armada_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644
index 00000000000..a72cae03b99
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_DRM_H
+#define ARMADA_DRM_H
+
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <drm/drmP.h>
+
+struct armada_crtc;
+struct armada_gem_object;
+struct clk;
+struct drm_fb_helper;
+
+static inline void
+armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
+{
+ uint32_t ov, v;
+
+ ov = v = readl_relaxed(ptr);
+ v = (v & ~mask) | val;
+ if (ov != v)
+ writel_relaxed(v, ptr);
+}
+
+static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
+{
+ uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
+
+ /* 88AP510 spec recommends pitch be a multiple of 128 */
+ return ALIGN(pitch, 128);
+}
+
+struct armada_vbl_event {
+ struct list_head node;
+ void *data;
+ void (*fn)(struct armada_crtc *, void *);
+};
+void armada_drm_vbl_event_add(struct armada_crtc *,
+ struct armada_vbl_event *);
+void armada_drm_vbl_event_remove(struct armada_crtc *,
+ struct armada_vbl_event *);
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
+ struct armada_vbl_event *);
+#define armada_drm_vbl_event_init(_e, _f, _d) do { \
+ struct armada_vbl_event *__e = _e; \
+ INIT_LIST_HEAD(&__e->node); \
+ __e->data = _d; \
+ __e->fn = _f; \
+} while (0)
+
+
+struct armada_private;
+
+struct armada_variant {
+ bool has_spu_adv_reg;
+ uint32_t spu_adv_reg;
+ int (*init)(struct armada_private *, struct device *);
+ int (*crtc_init)(struct armada_crtc *);
+ int (*crtc_compute_clock)(struct armada_crtc *,
+ const struct drm_display_mode *,
+ uint32_t *);
+};
+
+/* Variant ops */
+extern const struct armada_variant armada510_ops;
+
+struct armada_private {
+ const struct armada_variant *variant;
+ struct work_struct fb_unref_work;
+ DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
+ struct drm_fb_helper *fbdev;
+ struct armada_crtc *dcrtc[2];
+ struct drm_mm linear;
+ struct clk *extclk[2];
+ struct drm_property *csc_yuv_prop;
+ struct drm_property *csc_rgb_prop;
+ struct drm_property *colorkey_prop;
+ struct drm_property *colorkey_min_prop;
+ struct drm_property *colorkey_max_prop;
+ struct drm_property *colorkey_val_prop;
+ struct drm_property *colorkey_alpha_prop;
+ struct drm_property *colorkey_mode_prop;
+ struct drm_property *brightness_prop;
+ struct drm_property *contrast_prop;
+ struct drm_property *saturation_prop;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *de;
+#endif
+};
+
+void __armada_drm_queue_unref_work(struct drm_device *,
+ struct drm_framebuffer *);
+void armada_drm_queue_unref_work(struct drm_device *,
+ struct drm_framebuffer *);
+
+extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
+
+int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_lastclose(struct drm_device *);
+void armada_fbdev_fini(struct drm_device *);
+
+int armada_overlay_plane_create(struct drm_device *, unsigned long);
+
+int armada_drm_debugfs_init(struct drm_minor *);
+void armada_drm_debugfs_cleanup(struct drm_minor *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644
index 00000000000..8ab3cd1a8cd
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+#include <drm/i2c/tda998x.h>
+#include "armada_slave.h"
+
+static struct tda998x_encoder_params params = {
+ /* With 0x24, there is no translation between vp_out and int_vp
+ FB LCD out Pins VIP Int Vp
+ R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
+ G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
+ B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
+ */
+ .swap_a = 2,
+ .swap_b = 3,
+ .swap_c = 4,
+ .swap_d = 5,
+ .swap_e = 0,
+ .swap_f = 1,
+ .audio_cfg = BIT(2),
+ .audio_frame[1] = 1,
+ .audio_format = AFMT_SPDIF,
+ .audio_sample_rate = 44100,
+};
+
+static const struct armada_drm_slave_config tda19988_config = {
+ .i2c_adapter_id = 0,
+ .crtcs = 1 << 0, /* Only LCD0 at the moment */
+ .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
+ .interlace_allowed = true,
+ .info = {
+ .type = "tda998x",
+ .addr = 0x70,
+ .platform_data = &params,
+ },
+};
+#endif
+
+static void armada_drm_unref_work(struct work_struct *work)
+{
+ struct armada_private *priv =
+ container_of(work, struct armada_private, fb_unref_work);
+ struct drm_framebuffer *fb;
+
+ while (kfifo_get(&priv->fb_unref, &fb))
+ drm_framebuffer_unreference(fb);
+}
+
+/* Must be called with dev->event_lock held */
+void __armada_drm_queue_unref_work(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
+ schedule_work(&priv->fb_unref_work);
+}
+
+void armada_drm_queue_unref_work(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ __armada_drm_queue_unref_work(dev, fb);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int armada_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ const struct platform_device_id *id;
+ struct armada_private *priv;
+ struct resource *res[ARRAY_SIZE(priv->dcrtc)];
+ struct resource *mem = NULL;
+ int ret, n, i;
+
+ memset(res, 0, sizeof(res));
+
+ for (n = i = 0; ; n++) {
+ struct resource *r = platform_get_resource(dev->platformdev,
+ IORESOURCE_MEM, n);
+ if (!r)
+ break;
+
+ /* Resources above 64K are graphics memory */
+ if (resource_size(r) > SZ_64K)
+ mem = r;
+ else if (i < ARRAY_SIZE(priv->dcrtc))
+ res[i++] = r;
+ else
+ return -EINVAL;
+ }
+
+ if (!res[0] || !mem)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(dev->dev, mem->start,
+ resource_size(mem), "armada-drm"))
+ return -EBUSY;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate private\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(dev->platformdev, dev);
+ dev->dev_private = priv;
+
+ /* Get the implementation specific driver data. */
+ id = platform_get_device_id(dev->platformdev);
+ if (!id)
+ return -ENXIO;
+
+ priv->variant = (struct armada_variant *)id->driver_data;
+
+ ret = priv->variant->init(priv, dev->dev);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+ INIT_KFIFO(priv->fb_unref);
+
+ /* Mode setting support */
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = 320;
+ dev->mode_config.min_height = 200;
+
+ /*
+ * With vscale enabled, the maximum width is 1920 due to the
+ * 1920 by 3 lines RAM
+ */
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.funcs = &armada_drm_mode_config_funcs;
+ drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+
+ /* Create all LCD controllers */
+ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ if (!res[n])
+ break;
+
+ ret = armada_drm_crtc_create(dev, n, res[n]);
+ if (ret)
+ goto err_kms;
+ }
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+ ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+ if (ret)
+ goto err_kms;
+#endif
+
+ ret = drm_vblank_init(dev, n);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ if (ret)
+ goto err_kms;
+
+ dev->vblank_disable_allowed = 1;
+
+ ret = armada_fbdev_init(dev);
+ if (ret)
+ goto err_irq;
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+ err_irq:
+ drm_irq_uninstall(dev);
+ err_kms:
+ drm_mode_config_cleanup(dev);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+
+ return ret;
+}
+
+static int armada_drm_unload(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ drm_kms_helper_poll_fini(dev);
+ armada_fbdev_fini(dev);
+ drm_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ if (list_empty(&evt->node)) {
+ list_add_tail(&evt->node, &dcrtc->vbl_list);
+
+ drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ }
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ if (!list_empty(&evt->node)) {
+ list_del_init(&evt->node);
+ drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ }
+}
+
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
+ struct armada_vbl_event *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_vbl_event_remove(dcrtc, evt);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+/* These are called under the vbl_lock. */
+static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct armada_private *priv = dev->dev_private;
+ armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+ return 0;
+}
+
+static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct armada_private *priv = dev->dev_private;
+ armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+}
+
+static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+ uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+ irqreturn_t handled = IRQ_NONE;
+
+ /*
+ * This is rediculous - rather than writing bits to clear, we
+ * have to set the actual status register value. This is racy.
+ */
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /* Mask out those interrupts we haven't enabled */
+ v = stat & dcrtc->irq_ena;
+
+ if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+ armada_drm_crtc_irq(dcrtc, stat);
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
+static int armada_drm_irq_postinstall(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+ spin_lock_irq(&dev->vbl_lock);
+ writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ spin_unlock_irq(&dev->vbl_lock);
+
+ return 0;
+}
+
+static void armada_drm_irq_uninstall(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+ writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+}
+
+static struct drm_ioctl_desc armada_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
+ DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
+ DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
+ DRM_UNLOCKED),
+};
+
+static void armada_drm_lastclose(struct drm_device *dev)
+{
+ armada_fbdev_lastclose(dev);
+}
+
+static const struct file_operations armada_drm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = drm_read,
+ .poll = drm_poll,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .open = drm_open,
+ .release = drm_release,
+};
+
+static struct drm_driver armada_drm_driver = {
+ .load = armada_drm_load,
+ .open = NULL,
+ .preclose = NULL,
+ .postclose = NULL,
+ .lastclose = armada_drm_lastclose,
+ .unload = armada_drm_unload,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = armada_drm_enable_vblank,
+ .disable_vblank = armada_drm_disable_vblank,
+ .irq_handler = armada_drm_irq_handler,
+ .irq_postinstall = armada_drm_irq_postinstall,
+ .irq_uninstall = armada_drm_irq_uninstall,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = armada_drm_debugfs_init,
+ .debugfs_cleanup = armada_drm_debugfs_cleanup,
+#endif
+ .gem_free_object = armada_gem_free_object,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = armada_gem_prime_export,
+ .gem_prime_import = armada_gem_prime_import,
+ .dumb_create = armada_gem_dumb_create,
+ .dumb_map_offset = armada_gem_dumb_map_offset,
+ .dumb_destroy = armada_gem_dumb_destroy,
+ .gem_vm_ops = &armada_gem_vm_ops,
+ .major = 1,
+ .minor = 0,
+ .name = "armada-drm",
+ .desc = "Armada SoC DRM",
+ .date = "20120730",
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_HAVE_IRQ | DRIVER_PRIME,
+ .ioctls = armada_ioctls,
+ .fops = &armada_drm_fops,
+};
+
+static int armada_drm_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&armada_drm_driver, pdev);
+}
+
+static int armada_drm_remove(struct platform_device *pdev)
+{
+ drm_put_dev(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct platform_device_id armada_drm_platform_ids[] = {
+ {
+ .name = "armada-drm",
+ .driver_data = (unsigned long)&armada510_ops,
+ }, {
+ .name = "armada-510-drm",
+ .driver_data = (unsigned long)&armada510_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
+
+static struct platform_driver armada_drm_platform_driver = {
+ .probe = armada_drm_probe,
+ .remove = armada_drm_remove,
+ .driver = {
+ .name = "armada-drm",
+ .owner = THIS_MODULE,
+ },
+ .id_table = armada_drm_platform_ids,
+};
+
+static int __init armada_drm_init(void)
+{
+ armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
+ return platform_driver_register(&armada_drm_platform_driver);
+}
+module_init(armada_drm_init);
+
+static void __exit armada_drm_exit(void)
+{
+ platform_driver_unregister(&armada_drm_platform_driver);
+}
+module_exit(armada_drm_exit);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Armada DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644
index 00000000000..1c90969def3
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+static void armada_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+
+ drm_framebuffer_cleanup(&dfb->fb);
+ drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+ kfree(dfb);
+}
+
+static int armada_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *dfile, unsigned int *handle)
+{
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+ return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs armada_fb_funcs = {
+ .destroy = armada_fb_destroy,
+ .create_handle = armada_fb_create_handle,
+};
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+{
+ struct armada_framebuffer *dfb;
+ uint8_t format, config;
+ int ret;
+
+ switch (mode->pixel_format) {
+#define FMT(drm, fmt, mod) \
+ case DRM_FORMAT_##drm: \
+ format = CFG_##fmt; \
+ config = mod; \
+ break
+ FMT(RGB565, 565, CFG_SWAPRB);
+ FMT(BGR565, 565, 0);
+ FMT(ARGB1555, 1555, CFG_SWAPRB);
+ FMT(ABGR1555, 1555, 0);
+ FMT(RGB888, 888PACK, CFG_SWAPRB);
+ FMT(BGR888, 888PACK, 0);
+ FMT(XRGB8888, X888, CFG_SWAPRB);
+ FMT(XBGR8888, X888, 0);
+ FMT(ARGB8888, 8888, CFG_SWAPRB);
+ FMT(ABGR8888, 8888, 0);
+ FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
+ FMT(UYVY, 422PACK, CFG_YUV2RGB);
+ FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
+ FMT(YUV422, 422, CFG_YUV2RGB);
+ FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(YUV420, 420, CFG_YUV2RGB);
+ FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
+ FMT(C8, PSEUDO8, 0);
+#undef FMT
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
+ if (!dfb) {
+ DRM_ERROR("failed to allocate Armada fb object\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dfb->fmt = format;
+ dfb->mod = config;
+ dfb->obj = obj;
+
+ drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+
+ ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
+ if (ret) {
+ kfree(dfb);
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Take a reference on our object as we're successful - the
+ * caller already holds a reference, which keeps us safe for
+ * the above call, but the caller will drop their reference
+ * to it. Hence we need to take our own reference.
+ */
+ drm_gem_object_reference(&obj->obj);
+
+ return dfb;
+}
+
+static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+ struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
+{
+ struct armada_gem_object *obj;
+ struct armada_framebuffer *dfb;
+ int ret;
+
+ DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
+ mode->width, mode->height, mode->pixel_format,
+ mode->flags, mode->pitches[0], mode->pitches[1],
+ mode->pitches[2]);
+
+ /* We can only handle a single plane at the moment */
+ if (drm_format_num_planes(mode->pixel_format) > 1 &&
+ (mode->handles[0] != mode->handles[1] ||
+ mode->handles[0] != mode->handles[2])) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+ if (!obj) {
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (obj->obj.import_attach && !obj->sgt) {
+ ret = armada_gem_map_import(obj);
+ if (ret)
+ goto err_unref;
+ }
+
+ /* Framebuffer objects must have a valid device address for scanout */
+ if (obj->dev_addr == DMA_ERROR_CODE) {
+ ret = -EINVAL;
+ goto err_unref;
+ }
+
+ dfb = armada_framebuffer_create(dev, mode, obj);
+ if (IS_ERR(dfb)) {
+ ret = PTR_ERR(dfb);
+ goto err;
+ }
+
+ drm_gem_object_unreference_unlocked(&obj->obj);
+
+ return &dfb->fb;
+
+ err_unref:
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ err:
+ DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static void armada_output_poll_changed(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh = priv->fbdev;
+
+ if (fbh)
+ drm_fb_helper_hotplug_event(fbh);
+}
+
+const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+ .fb_create = armada_fb_create,
+ .output_poll_changed = armada_output_poll_changed,
+};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644
index 00000000000..ce3f12ebfc5
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_FB_H
+#define ARMADA_FB_H
+
+struct armada_framebuffer {
+ struct drm_framebuffer fb;
+ struct armada_gem_object *obj;
+ uint8_t fmt;
+ uint8_t mod;
+};
+#define drm_fb_to_armada_fb(dfb) \
+ container_of(dfb, struct armada_framebuffer, fb)
+#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+ struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644
index 00000000000..fd166f532ab
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Written from the i915 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+
+static /*const*/ struct fb_ops armada_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int armada_fb_create(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fbh->dev;
+ struct drm_mode_fb_cmd2 mode;
+ struct armada_framebuffer *dfb;
+ struct armada_gem_object *obj;
+ struct fb_info *info;
+ int size, ret;
+ void *ptr;
+
+ memset(&mode, 0, sizeof(mode));
+ mode.width = sizes->surface_width;
+ mode.height = sizes->surface_height;
+ mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
+ mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode.pitches[0] * mode.height;
+ obj = armada_gem_alloc_private_object(dev, size);
+ if (!obj) {
+ DRM_ERROR("failed to allocate fb memory\n");
+ return -ENOMEM;
+ }
+
+ ret = armada_gem_linear_back(dev, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return ret;
+ }
+
+ ptr = armada_gem_map_object(dev, obj);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(&obj->obj);
+ return -ENOMEM;
+ }
+
+ dfb = armada_framebuffer_create(dev, &mode, obj);
+
+ /*
+ * A reference is now held by the framebuffer object if
+ * successful, otherwise this drops the ref for the error path.
+ */
+ drm_gem_object_unreference_unlocked(&obj->obj);
+
+ if (IS_ERR(dfb))
+ return PTR_ERR(dfb);
+
+ info = framebuffer_alloc(0, dev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto err_fballoc;
+ }
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_fbcmap;
+ }
+
+ strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
+ info->par = fbh;
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &armada_fb_ops;
+ info->fix.smem_start = obj->phys_addr;
+ info->fix.smem_len = obj->obj.size;
+ info->screen_size = obj->obj.size;
+ info->screen_base = ptr;
+ fbh->fb = &dfb->fb;
+ fbh->fbdev = info;
+ drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+ drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+
+ DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
+ dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
+ (unsigned long long)obj->phys_addr);
+
+ return 0;
+
+ err_fbcmap:
+ framebuffer_release(info);
+ err_fballoc:
+ dfb->fb.funcs->destroy(&dfb->fb);
+ return ret;
+}
+
+static int armada_fb_probe(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ if (!fbh->fb) {
+ ret = armada_fb_create(fbh, sizes);
+ if (ret == 0)
+ ret = 1;
+ }
+ return ret;
+}
+
+static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+ .gamma_set = armada_drm_crtc_gamma_set,
+ .gamma_get = armada_drm_crtc_gamma_get,
+ .fb_probe = armada_fb_probe,
+};
+
+int armada_fbdev_init(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh;
+ int ret;
+
+ fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
+ if (!fbh)
+ return -ENOMEM;
+
+ priv->fbdev = fbh;
+
+ fbh->funcs = &armada_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, fbh, 1, 1);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm fb helper\n");
+ goto err_fb_helper;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fbh);
+ if (ret) {
+ DRM_ERROR("failed to add fb connectors\n");
+ goto err_fb_setup;
+ }
+
+ ret = drm_fb_helper_initial_config(fbh, 32);
+ if (ret) {
+ DRM_ERROR("failed to set initial config\n");
+ goto err_fb_setup;
+ }
+
+ return 0;
+ err_fb_setup:
+ drm_fb_helper_fini(fbh);
+ err_fb_helper:
+ priv->fbdev = NULL;
+ return ret;
+}
+
+void armada_fbdev_lastclose(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ if (priv->fbdev)
+ drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+}
+
+void armada_fbdev_fini(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_fb_helper *fbh = priv->fbdev;
+
+ if (fbh) {
+ struct fb_info *info = fbh->fbdev;
+
+ if (info) {
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ drm_fb_helper_fini(fbh);
+
+ if (fbh->fb)
+ fbh->fb->funcs->destroy(fbh->fb);
+
+ priv->fbdev = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644
index 00000000000..bb9b642d848
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -0,0 +1,610 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <drm/drmP.h>
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+ unsigned long addr = (unsigned long)vmf->virtual_address;
+ unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
+ int ret;
+
+ pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
+ ret = vm_insert_pfn(vma, addr, pfn);
+
+ switch (ret) {
+ case 0:
+ case -EBUSY:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+const struct vm_operations_struct armada_gem_vm_ops = {
+ .fault = armada_gem_vm_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static size_t roundup_gem_size(size_t size)
+{
+ return roundup(size, PAGE_SIZE);
+}
+
+/* dev->struct_mutex is held here */
+void armada_gem_free_object(struct drm_gem_object *obj)
+{
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+
+ DRM_DEBUG_DRIVER("release obj %p\n", dobj);
+
+ drm_gem_free_mmap_offset(&dobj->obj);
+
+ if (dobj->page) {
+ /* page backed memory */
+ unsigned int order = get_order(dobj->obj.size);
+ __free_pages(dobj->page, order);
+ } else if (dobj->linear) {
+ /* linear backed memory */
+ drm_mm_remove_node(dobj->linear);
+ kfree(dobj->linear);
+ if (dobj->addr)
+ iounmap(dobj->addr);
+ }
+
+ if (dobj->obj.import_attach) {
+ /* We only ever display imported data */
+ dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
+ DMA_TO_DEVICE);
+ drm_prime_gem_destroy(&dobj->obj, NULL);
+ }
+
+ drm_gem_object_release(&dobj->obj);
+
+ kfree(dobj);
+}
+
+int
+armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
+{
+ struct armada_private *priv = dev->dev_private;
+ size_t size = obj->obj.size;
+
+ if (obj->page || obj->linear)
+ return 0;
+
+ /*
+ * If it is a small allocation (typically cursor, which will
+ * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
+ * Framebuffers will never be this small (our minimum size for
+ * framebuffers is larger than this anyway.) Such objects are
+ * only accessed by the CPU so we don't need any special handing
+ * here.
+ */
+ if (size <= 8192) {
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (p) {
+ obj->addr = page_address(p);
+ obj->phys_addr = page_to_phys(p);
+ obj->page = p;
+
+ memset(obj->addr, 0, PAGE_ALIGN(size));
+ }
+ }
+
+ /*
+ * We could grab something from CMA if it's enabled, but that
+ * involves building in a problem:
+ *
+ * CMA's interface uses dma_alloc_coherent(), which provides us
+ * with an CPU virtual address and a device address.
+ *
+ * The CPU virtual address may be either an address in the kernel
+ * direct mapped region (for example, as it would be on x86) or
+ * it may be remapped into another part of kernel memory space
+ * (eg, as it would be on ARM.) This means virt_to_phys() on the
+ * returned virtual address is invalid depending on the architecture
+ * implementation.
+ *
+ * The device address may also not be a physical address; it may
+ * be that there is some kind of remapping between the device and
+ * system RAM, which makes the use of the device address also
+ * unsafe to re-use as a physical address.
+ *
+ * This makes DRM usage of dma_alloc_coherent() in a generic way
+ * at best very questionable and unsafe.
+ */
+
+ /* Otherwise, grab it from our linear allocation */
+ if (!obj->page) {
+ struct drm_mm_node *node;
+ unsigned align = min_t(unsigned, size, SZ_2M);
+ void __iomem *ptr;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOSPC;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mm_insert_node(&priv->linear, node, size, align,
+ DRM_MM_SEARCH_DEFAULT);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ obj->linear = node;
+
+ /* Ensure that the memory we're returning is cleared. */
+ ptr = ioremap_wc(obj->linear->start, size);
+ if (!ptr) {
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_remove_node(obj->linear);
+ mutex_unlock(&dev->struct_mutex);
+ kfree(obj->linear);
+ obj->linear = NULL;
+ return -ENOMEM;
+ }
+
+ memset_io(ptr, 0, size);
+ iounmap(ptr);
+
+ obj->phys_addr = obj->linear->start;
+ obj->dev_addr = obj->linear->start;
+ }
+
+ DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
+ (unsigned long long)obj->phys_addr,
+ (unsigned long long)obj->dev_addr);
+
+ return 0;
+}
+
+void *
+armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
+{
+ /* only linear objects need to be ioremap'd */
+ if (!dobj->addr && dobj->linear)
+ dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
+ return dobj->addr;
+}
+
+struct armada_gem_object *
+armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
+{
+ struct armada_gem_object *obj;
+
+ size = roundup_gem_size(size);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ drm_gem_private_object_init(dev, &obj->obj, size);
+ obj->dev_addr = DMA_ERROR_CODE;
+
+ DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
+
+ return obj;
+}
+
+struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+ size_t size)
+{
+ struct armada_gem_object *obj;
+ struct address_space *mapping;
+
+ size = roundup_gem_size(size);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ if (drm_gem_object_init(dev, &obj->obj, size)) {
+ kfree(obj);
+ return NULL;
+ }
+
+ obj->dev_addr = DMA_ERROR_CODE;
+
+ mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
+ DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
+
+ return obj;
+}
+
+/* Dumb alloc support */
+int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct armada_gem_object *dobj;
+ u32 handle;
+ size_t size;
+ int ret;
+
+ args->pitch = armada_pitch(args->width, args->bpp);
+ args->size = size = args->pitch * args->height;
+
+ dobj = armada_gem_alloc_private_object(dev, size);
+ if (dobj == NULL)
+ return -ENOMEM;
+
+ ret = armada_gem_linear_back(dev, dobj);
+ if (ret)
+ goto err;
+
+ ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+ if (ret)
+ goto err;
+
+ args->handle = handle;
+
+ /* drop reference from allocate - handle holds it now */
+ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct armada_gem_object *obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = armada_gem_object_lookup(dev, file, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* Don't allow imported objects to be mapped */
+ if (obj->obj.import_attach) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = drm_gem_create_mmap_offset(&obj->obj);
+ if (ret == 0) {
+ *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
+ DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
+ }
+
+ drm_gem_object_unreference(&obj->obj);
+ err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/* Private driver gem ioctls */
+int armada_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_create *args = data;
+ struct armada_gem_object *dobj;
+ size_t size;
+ u32 handle;
+ int ret;
+
+ if (args->size == 0)
+ return -ENOMEM;
+
+ size = args->size;
+
+ dobj = armada_gem_alloc_object(dev, size);
+ if (dobj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+ if (ret)
+ goto err;
+
+ args->handle = handle;
+
+ /* drop reference from allocate - handle holds it now */
+ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+/* Map a shmem-backed object into process memory space */
+int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_mmap *args = data;
+ struct armada_gem_object *dobj;
+ unsigned long addr;
+
+ dobj = armada_gem_object_lookup(dev, file, args->handle);
+ if (dobj == NULL)
+ return -ENOENT;
+
+ if (!dobj->obj.filp) {
+ drm_gem_object_unreference(&dobj->obj);
+ return -EINVAL;
+ }
+
+ addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, args->offset);
+ drm_gem_object_unreference(&dobj->obj);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ args->addr = addr;
+
+ return 0;
+}
+
+int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_armada_gem_pwrite *args = data;
+ struct armada_gem_object *dobj;
+ char __user *ptr;
+ int ret;
+
+ DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
+ args->handle, args->offset, args->size, args->ptr);
+
+ if (args->size == 0)
+ return 0;
+
+ ptr = (char __user *)(uintptr_t)args->ptr;
+
+ if (!access_ok(VERIFY_READ, ptr, args->size))
+ return -EFAULT;
+
+ ret = fault_in_multipages_readable(ptr, args->size);
+ if (ret)
+ return ret;
+
+ dobj = armada_gem_object_lookup(dev, file, args->handle);
+ if (dobj == NULL)
+ return -ENOENT;
+
+ /* Must be a kernel-mapped object */
+ if (!dobj->addr)
+ return -EINVAL;
+
+ if (args->offset > dobj->obj.size ||
+ args->size > dobj->obj.size - args->offset) {
+ DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
+ ret = -EINVAL;
+ goto unref;
+ }
+
+ if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
+ ret = -EFAULT;
+ } else if (dobj->update) {
+ dobj->update(dobj->update_data);
+ ret = 0;
+ }
+
+ unref:
+ drm_gem_object_unreference_unlocked(&dobj->obj);
+ return ret;
+}
+
+/* Prime support */
+struct sg_table *
+armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ int i, num;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (dobj->obj.filp) {
+ struct address_space *mapping;
+ int count;
+
+ count = dobj->obj.size / PAGE_SIZE;
+ if (sg_alloc_table(sgt, count, GFP_KERNEL))
+ goto free_sgt;
+
+ mapping = file_inode(dobj->obj.filp)->i_mapping;
+
+ for_each_sg(sgt->sgl, sg, count, i) {
+ struct page *page;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+ num = i;
+ goto release;
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
+ num = sgt->nents;
+ goto release;
+ }
+ } else if (dobj->page) {
+ /* Single contiguous page */
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free_sgt;
+
+ sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free_table;
+ } else if (dobj->linear) {
+ /* Single contiguous physical region - no struct page */
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free_sgt;
+ sg_dma_address(sgt->sgl) = dobj->dev_addr;
+ sg_dma_len(sgt->sgl) = dobj->obj.size;
+ } else {
+ goto free_sgt;
+ }
+ return sgt;
+
+ release:
+ for_each_sg(sgt->sgl, sg, num, i)
+ page_cache_release(sg_page(sg));
+ free_table:
+ sg_free_table(sgt);
+ free_sgt:
+ kfree(sgt);
+ return NULL;
+}
+
+static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+ int i;
+
+ if (!dobj->linear)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ if (dobj->obj.filp) {
+ struct scatterlist *sg;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ page_cache_release(sg_page(sg));
+ }
+
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
+{
+ return NULL;
+}
+
+static void
+armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
+{
+}
+
+static int
+armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
+ .map_dma_buf = armada_gem_prime_map_dma_buf,
+ .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap_atomic = armada_gem_dmabuf_no_kmap,
+ .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
+ .kmap = armada_gem_dmabuf_no_kmap,
+ .kunmap = armada_gem_dmabuf_no_kunmap,
+ .mmap = armada_gem_dmabuf_mmap,
+};
+
+struct dma_buf *
+armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
+ int flags)
+{
+ return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
+ O_RDWR);
+}
+
+struct drm_gem_object *
+armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+ struct dma_buf_attachment *attach;
+ struct armada_gem_object *dobj;
+
+ if (buf->ops == &armada_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing our own dmabuf(s) increases the
+ * refcount on the gem object itself.
+ */
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ dobj = armada_gem_alloc_private_object(dev, buf->size);
+ if (!dobj) {
+ dma_buf_detach(buf, attach);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dobj->obj.import_attach = attach;
+ get_dma_buf(buf);
+
+ /*
+ * Don't call dma_buf_map_attachment() here - it maps the
+ * scatterlist immediately for DMA, and this is not always
+ * an appropriate thing to do.
+ */
+ return &dobj->obj;
+}
+
+int armada_gem_map_import(struct armada_gem_object *dobj)
+{
+ int ret;
+
+ dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
+ DMA_TO_DEVICE);
+ if (!dobj->sgt) {
+ DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
+ return -EINVAL;
+ }
+ if (IS_ERR(dobj->sgt)) {
+ ret = PTR_ERR(dobj->sgt);
+ dobj->sgt = NULL;
+ DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
+ return ret;
+ }
+ if (dobj->sgt->nents > 1) {
+ DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
+ return -EINVAL;
+ }
+ if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
+ DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
+ return -EINVAL;
+ }
+ dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+ return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644
index 00000000000..00b6cd461a0
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_GEM_H
+#define ARMADA_GEM_H
+
+/* GEM */
+struct armada_gem_object {
+ struct drm_gem_object obj;
+ void *addr;
+ phys_addr_t phys_addr;
+ resource_size_t dev_addr;
+ struct drm_mm_node *linear; /* for linear backed */
+ struct page *page; /* for page backed */
+ struct sg_table *sgt; /* for imported */
+ void (*update)(void *);
+ void *update_data;
+};
+
+extern const struct vm_operations_struct armada_gem_vm_ops;
+
+#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
+
+void armada_gem_free_object(struct drm_gem_object *);
+int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
+void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
+struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
+ size_t);
+int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *);
+int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t, uint64_t *);
+int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t);
+struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
+ struct dma_buf *);
+int armada_gem_map_import(struct armada_gem_object *);
+
+static inline struct armada_gem_object *armada_gem_object_lookup(
+ struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+{
+ struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+
+ return obj ? drm_to_armada_gem(obj) : NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644
index 00000000000..27319a8335e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_HW_H
+#define ARMADA_HW_H
+
+/*
+ * Note: the following registers are written from IRQ context:
+ * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ * LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
+ * LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
+ * LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
+ */
+enum {
+ LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */
+ LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0,
+ LCD_SPU_DMA_START_ADDR_U0 = 0x00c4,
+ LCD_SPU_DMA_START_ADDR_V0 = 0x00c8,
+ LCD_CFG_DMA_START_ADDR_0 = 0x00cc,
+ LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0,
+ LCD_SPU_DMA_START_ADDR_U1 = 0x00d4,
+ LCD_SPU_DMA_START_ADDR_V1 = 0x00d8,
+ LCD_CFG_DMA_START_ADDR_1 = 0x00dc,
+ LCD_SPU_DMA_PITCH_YC = 0x00e0,
+ LCD_SPU_DMA_PITCH_UV = 0x00e4,
+ LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8,
+ LCD_SPU_DMA_HPXL_VLN = 0x00ec,
+ LCD_SPU_DZM_HPXL_VLN = 0x00f0,
+ LCD_CFG_GRA_START_ADDR0 = 0x00f4,
+ LCD_CFG_GRA_START_ADDR1 = 0x00f8,
+ LCD_CFG_GRA_PITCH = 0x00fc,
+ LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100,
+ LCD_SPU_GRA_HPXL_VLN = 0x0104,
+ LCD_SPU_GZM_HPXL_VLN = 0x0108,
+ LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c,
+ LCD_SPU_HWC_HPXL_VLN = 0x0110,
+ LCD_SPUT_V_H_TOTAL = 0x0114,
+ LCD_SPU_V_H_ACTIVE = 0x0118,
+ LCD_SPU_H_PORCH = 0x011c,
+ LCD_SPU_V_PORCH = 0x0120,
+ LCD_SPU_BLANKCOLOR = 0x0124,
+ LCD_SPU_ALPHA_COLOR1 = 0x0128,
+ LCD_SPU_ALPHA_COLOR2 = 0x012c,
+ LCD_SPU_COLORKEY_Y = 0x0130,
+ LCD_SPU_COLORKEY_U = 0x0134,
+ LCD_SPU_COLORKEY_V = 0x0138,
+ LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */
+ LCD_SPU_SPI_RXDATA = 0x0140,
+ LCD_SPU_ISA_RXDATA = 0x0144,
+ LCD_SPU_HWC_RDDAT = 0x0158,
+ LCD_SPU_GAMMA_RDDAT = 0x015c,
+ LCD_SPU_PALETTE_RDDAT = 0x0160,
+ LCD_SPU_IOPAD_IN = 0x0178,
+ LCD_CFG_RDREG5F = 0x017c,
+ LCD_SPU_SPI_CTRL = 0x0180,
+ LCD_SPU_SPI_TXDATA = 0x0184,
+ LCD_SPU_SMPN_CTRL = 0x0188,
+ LCD_SPU_DMA_CTRL0 = 0x0190,
+ LCD_SPU_DMA_CTRL1 = 0x0194,
+ LCD_SPU_SRAM_CTRL = 0x0198,
+ LCD_SPU_SRAM_WRDAT = 0x019c,
+ LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */
+ LCD_SPU_SRAM_PARA1 = 0x01a4,
+ LCD_CFG_SCLK_DIV = 0x01a8,
+ LCD_SPU_CONTRAST = 0x01ac,
+ LCD_SPU_SATURATION = 0x01b0,
+ LCD_SPU_CBSH_HUE = 0x01b4,
+ LCD_SPU_DUMB_CTRL = 0x01b8,
+ LCD_SPU_IOPAD_CONTROL = 0x01bc,
+ LCD_SPU_IRQ_ENA = 0x01c0,
+ LCD_SPU_IRQ_ISR = 0x01c4,
+};
+
+/* For LCD_SPU_ADV_REG */
+enum {
+ ADV_VSYNC_L_OFF = 0xfff << 20,
+ ADV_GRACOLORKEY = 1 << 19,
+ ADV_VIDCOLORKEY = 1 << 18,
+ ADV_HWC32BLEND = 1 << 15,
+ ADV_HWC32ARGB = 1 << 14,
+ ADV_HWC32ENABLE = 1 << 13,
+ ADV_VSYNCOFFEN = 1 << 12,
+ ADV_VSYNC_H_OFF = 0xfff << 0,
+};
+
+enum {
+ CFG_565 = 0,
+ CFG_1555 = 1,
+ CFG_888PACK = 2,
+ CFG_X888 = 3,
+ CFG_8888 = 4,
+ CFG_422PACK = 5,
+ CFG_422 = 6,
+ CFG_420 = 7,
+ CFG_PSEUDO4 = 9,
+ CFG_PSEUDO8 = 10,
+ CFG_SWAPRB = 1 << 4,
+ CFG_SWAPUV = 1 << 3,
+ CFG_SWAPYU = 1 << 2,
+ CFG_YUV2RGB = 1 << 1,
+};
+
+/* For LCD_SPU_DMA_CTRL0 */
+enum {
+ CFG_NOBLENDING = 1 << 31,
+ CFG_GAMMA_ENA = 1 << 30,
+ CFG_CBSH_ENA = 1 << 29,
+ CFG_PALETTE_ENA = 1 << 28,
+ CFG_ARBFAST_ENA = 1 << 27,
+ CFG_HWC_1BITMOD = 1 << 26,
+ CFG_HWC_1BITENA = 1 << 25,
+ CFG_HWC_ENA = 1 << 24,
+ CFG_DMAFORMAT = 0xf << 20,
+#define CFG_DMA_FMT(x) ((x) << 20)
+ CFG_GRAFORMAT = 0xf << 16,
+#define CFG_GRA_FMT(x) ((x) << 16)
+#define CFG_GRA_MOD(x) ((x) << 8)
+ CFG_GRA_FTOGGLE = 1 << 15,
+ CFG_GRA_HSMOOTH = 1 << 14,
+ CFG_GRA_TSTMODE = 1 << 13,
+ CFG_GRA_ENA = 1 << 8,
+#define CFG_DMA_MOD(x) ((x) << 0)
+ CFG_DMA_FTOGGLE = 1 << 7,
+ CFG_DMA_HSMOOTH = 1 << 6,
+ CFG_DMA_TSTMODE = 1 << 5,
+ CFG_DMA_ENA = 1 << 0,
+};
+
+enum {
+ CKMODE_DISABLE = 0,
+ CKMODE_Y = 1,
+ CKMODE_U = 2,
+ CKMODE_RGB = 3,
+ CKMODE_V = 4,
+ CKMODE_R = 5,
+ CKMODE_G = 6,
+ CKMODE_B = 7,
+};
+
+/* For LCD_SPU_DMA_CTRL1 */
+enum {
+ CFG_FRAME_TRIG = 1 << 31,
+ CFG_VSYNC_INV = 1 << 27,
+ CFG_CKMODE_MASK = 0x7 << 24,
+#define CFG_CKMODE(x) ((x) << 24)
+ CFG_CARRY = 1 << 23,
+ CFG_GATED_CLK = 1 << 21,
+ CFG_PWRDN_ENA = 1 << 20,
+ CFG_DSCALE_MASK = 0x3 << 18,
+ CFG_DSCALE_NONE = 0x0 << 18,
+ CFG_DSCALE_HALF = 0x1 << 18,
+ CFG_DSCALE_QUAR = 0x2 << 18,
+ CFG_ALPHAM_MASK = 0x3 << 16,
+ CFG_ALPHAM_VIDEO = 0x0 << 16,
+ CFG_ALPHAM_GRA = 0x1 << 16,
+ CFG_ALPHAM_CFG = 0x2 << 16,
+ CFG_ALPHA_MASK = 0xff << 8,
+ CFG_PIXCMD_MASK = 0xff,
+};
+
+/* For LCD_SPU_SRAM_CTRL */
+enum {
+ SRAM_READ = 0 << 14,
+ SRAM_WRITE = 2 << 14,
+ SRAM_INIT = 3 << 14,
+ SRAM_HWC32_RAM1 = 0xc << 8,
+ SRAM_HWC32_RAM2 = 0xd << 8,
+ SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
+ SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
+ SRAM_HWC32_RAMB = 0xe << 8,
+ SRAM_HWC32_TRAN = 0xf << 8,
+ SRAM_HWC = 0xf << 8,
+};
+
+/* For LCD_SPU_SRAM_PARA1 */
+enum {
+ CFG_CSB_256x32 = 1 << 15, /* cursor */
+ CFG_CSB_256x24 = 1 << 14, /* palette */
+ CFG_CSB_256x8 = 1 << 13, /* gamma */
+ CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */
+ CFG_PDWN256x32 = 1 << 7, /* power down cursor */
+ CFG_PDWN256x24 = 1 << 6, /* power down palette */
+ CFG_PDWN256x8 = 1 << 5, /* power down gamma */
+ CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */
+ CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */
+ CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */
+ CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */
+ CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */
+};
+
+/* For LCD_CFG_SCLK_DIV */
+enum {
+ /* Armada 510 */
+ SCLK_510_AXI = 0x0 << 30,
+ SCLK_510_EXTCLK0 = 0x1 << 30,
+ SCLK_510_PLL = 0x2 << 30,
+ SCLK_510_EXTCLK1 = 0x3 << 30,
+ SCLK_510_DIV_CHANGE = 1 << 29,
+ SCLK_510_FRAC_DIV_MASK = 0xfff << 16,
+ SCLK_510_INT_DIV_MASK = 0xffff << 0,
+
+ /* Armada 16x */
+ SCLK_16X_AHB = 0x0 << 28,
+ SCLK_16X_PCLK = 0x1 << 28,
+ SCLK_16X_AXI = 0x4 << 28,
+ SCLK_16X_PLL = 0x8 << 28,
+ SCLK_16X_FRAC_DIV_MASK = 0xfff << 16,
+ SCLK_16X_INT_DIV_MASK = 0xffff << 0,
+};
+
+/* For LCD_SPU_DUMB_CTRL */
+enum {
+ DUMB16_RGB565_0 = 0x0 << 28,
+ DUMB16_RGB565_1 = 0x1 << 28,
+ DUMB18_RGB666_0 = 0x2 << 28,
+ DUMB18_RGB666_1 = 0x3 << 28,
+ DUMB12_RGB444_0 = 0x4 << 28,
+ DUMB12_RGB444_1 = 0x5 << 28,
+ DUMB24_RGB888_0 = 0x6 << 28,
+ DUMB_BLANK = 0x7 << 28,
+ DUMB_MASK = 0xf << 28,
+ CFG_BIAS_OUT = 1 << 8,
+ CFG_REV_RGB = 1 << 7,
+ CFG_INV_CBLANK = 1 << 6,
+ CFG_INV_CSYNC = 1 << 5, /* Normally active high */
+ CFG_INV_HENA = 1 << 4,
+ CFG_INV_VSYNC = 1 << 3, /* Normally active high */
+ CFG_INV_HSYNC = 1 << 2, /* Normally active high */
+ CFG_INV_PCLK = 1 << 1,
+ CFG_DUMB_ENA = 1 << 0,
+};
+
+/* For LCD_SPU_IOPAD_CONTROL */
+enum {
+ CFG_VSCALE_LN_EN = 3 << 18,
+ CFG_GRA_VM_ENA = 1 << 15,
+ CFG_DMA_VM_ENA = 1 << 13,
+ CFG_CMD_VM_ENA = 1 << 11,
+ CFG_CSC_MASK = 3 << 8,
+ CFG_CSC_YUV_CCIR709 = 1 << 9,
+ CFG_CSC_YUV_CCIR601 = 0 << 9,
+ CFG_CSC_RGB_STUDIO = 1 << 8,
+ CFG_CSC_RGB_COMPUTER = 0 << 8,
+ CFG_IOPAD_MASK = 0xf << 0,
+ CFG_IOPAD_DUMB24 = 0x0 << 0,
+ CFG_IOPAD_DUMB18SPI = 0x1 << 0,
+ CFG_IOPAD_DUMB18GPIO = 0x2 << 0,
+ CFG_IOPAD_DUMB16SPI = 0x3 << 0,
+ CFG_IOPAD_DUMB16GPIO = 0x4 << 0,
+ CFG_IOPAD_DUMB12GPIO = 0x5 << 0,
+ CFG_IOPAD_SMART18 = 0x6 << 0,
+ CFG_IOPAD_SMART16 = 0x7 << 0,
+ CFG_IOPAD_SMART8 = 0x8 << 0,
+};
+
+#define IOPAD_DUMB24 0x0
+
+/* For LCD_SPU_IRQ_ENA */
+enum {
+ DMA_FRAME_IRQ0_ENA = 1 << 31,
+ DMA_FRAME_IRQ1_ENA = 1 << 30,
+ DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
+ DMA_FF_UNDERFLOW_ENA = 1 << 29,
+ GRA_FRAME_IRQ0_ENA = 1 << 27,
+ GRA_FRAME_IRQ1_ENA = 1 << 26,
+ GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
+ GRA_FF_UNDERFLOW_ENA = 1 << 25,
+ VSYNC_IRQ_ENA = 1 << 23,
+ DUMB_FRAMEDONE_ENA = 1 << 22,
+ TWC_FRAMEDONE_ENA = 1 << 21,
+ HWC_FRAMEDONE_ENA = 1 << 20,
+ SLV_IRQ_ENA = 1 << 19,
+ SPI_IRQ_ENA = 1 << 18,
+ PWRDN_IRQ_ENA = 1 << 17,
+ ERR_IRQ_ENA = 1 << 16,
+ CLEAN_SPU_IRQ_ISR = 0xffff,
+};
+
+/* For LCD_SPU_IRQ_ISR */
+enum {
+ DMA_FRAME_IRQ0 = 1 << 31,
+ DMA_FRAME_IRQ1 = 1 << 30,
+ DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
+ DMA_FF_UNDERFLOW = 1 << 29,
+ GRA_FRAME_IRQ0 = 1 << 27,
+ GRA_FRAME_IRQ1 = 1 << 26,
+ GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
+ GRA_FF_UNDERFLOW = 1 << 25,
+ VSYNC_IRQ = 1 << 23,
+ DUMB_FRAMEDONE = 1 << 22,
+ TWC_FRAMEDONE = 1 << 21,
+ HWC_FRAMEDONE = 1 << 20,
+ SLV_IRQ = 1 << 19,
+ SPI_IRQ = 1 << 18,
+ PWRDN_IRQ = 1 << 17,
+ ERR_IRQ = 1 << 16,
+ DMA_FRAME_IRQ0_LEVEL = 1 << 15,
+ DMA_FRAME_IRQ1_LEVEL = 1 << 14,
+ DMA_FRAME_CNT_ISR = 3 << 12,
+ GRA_FRAME_IRQ0_LEVEL = 1 << 11,
+ GRA_FRAME_IRQ1_LEVEL = 1 << 10,
+ GRA_FRAME_CNT_ISR = 3 << 8,
+ VSYNC_IRQ_LEVEL = 1 << 7,
+ DUMB_FRAMEDONE_LEVEL = 1 << 6,
+ TWC_FRAMEDONE_LEVEL = 1 << 5,
+ HWC_FRAMEDONE_LEVEL = 1 << 4,
+ SLV_FF_EMPTY = 1 << 3,
+ DMA_FF_ALLEMPTY = 1 << 2,
+ GRA_FF_ALLEMPTY = 1 << 1,
+ PWRDN_IRQ_LEVEL = 1 << 0,
+};
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644
index 00000000000..bd8c4562066
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_ioctlP.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_IOCTLP_H
+#define ARMADA_IOCTLP_H
+
+#define ARMADA_IOCTL_PROTO(name)\
+extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
+
+ARMADA_IOCTL_PROTO(gem_create);
+ARMADA_IOCTL_PROTO(gem_mmap);
+ARMADA_IOCTL_PROTO(gem_pwrite);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644
index 00000000000..d685a542148
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_output.h"
+#include "armada_drm.h"
+
+struct armada_connector {
+ struct drm_connector conn;
+ const struct armada_output_type *type;
+};
+
+#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
+{
+ struct drm_encoder *enc = conn->encoder;
+
+ return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
+}
+
+static enum drm_connector_status armada_drm_connector_detect(
+ struct drm_connector *conn, bool force)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (dconn->type->detect) {
+ status = dconn->type->detect(conn, force);
+ } else {
+ struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+
+ if (enc)
+ status = encoder_helper_funcs(enc)->detect(enc, conn);
+ }
+
+ return status;
+}
+
+static void armada_drm_connector_destroy(struct drm_connector *conn)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+ drm_sysfs_connector_remove(conn);
+ drm_connector_cleanup(conn);
+ kfree(dconn);
+}
+
+static int armada_drm_connector_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value)
+{
+ struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+ if (!dconn->type->set_property)
+ return -EINVAL;
+
+ return dconn->type->set_property(conn, property, value);
+}
+
+static const struct drm_connector_funcs armada_drm_conn_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = armada_drm_connector_detect,
+ .destroy = armada_drm_connector_destroy,
+ .set_property = armada_drm_connector_set_property,
+};
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+ encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void armada_drm_encoder_commit(struct drm_encoder *encoder)
+{
+ encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+/* Shouldn't this be a generic helper function? */
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode)
+{
+ struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+ int valid = MODE_BAD;
+
+ if (encoder) {
+ struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+ valid = slave->slave_funcs->mode_valid(encoder, mode);
+ }
+ return valid;
+}
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value)
+{
+ struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+ int rc = -EINVAL;
+
+ if (encoder) {
+ struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+ rc = slave->slave_funcs->set_property(encoder, conn, property,
+ value);
+ }
+ return rc;
+}
+
+int armada_output_create(struct drm_device *dev,
+ const struct armada_output_type *type, const void *data)
+{
+ struct armada_connector *dconn;
+ int ret;
+
+ dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
+ if (!dconn)
+ return -ENOMEM;
+
+ dconn->type = type;
+
+ ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
+ type->connector_type);
+ if (ret) {
+ DRM_ERROR("unable to init connector\n");
+ goto err_destroy_dconn;
+ }
+
+ ret = type->create(&dconn->conn, data);
+ if (ret)
+ goto err_conn;
+
+ ret = drm_sysfs_connector_add(&dconn->conn);
+ if (ret)
+ goto err_sysfs;
+
+ return 0;
+
+ err_sysfs:
+ if (dconn->conn.encoder)
+ dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
+ err_conn:
+ drm_connector_cleanup(&dconn->conn);
+ err_destroy_dconn:
+ kfree(dconn);
+ return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644
index 00000000000..4126d43b505
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CONNETOR_H
+#define ARMADA_CONNETOR_H
+
+#define encoder_helper_funcs(encoder) \
+ ((struct drm_encoder_helper_funcs *)encoder->helper_private)
+
+struct armada_output_type {
+ int connector_type;
+ enum drm_connector_status (*detect)(struct drm_connector *, bool);
+ int (*create)(struct drm_connector *, const void *);
+ int (*set_property)(struct drm_connector *, struct drm_property *,
+ uint64_t);
+};
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder);
+void armada_drm_encoder_commit(struct drm_encoder *encoder);
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode, struct drm_display_mode *adj);
+
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode);
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+ struct drm_property *property, uint64_t value);
+
+int armada_output_create(struct drm_device *dev,
+ const struct armada_output_type *type, const void *data);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644
index 00000000000..c5b06fdb459
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+struct armada_plane_properties {
+ uint32_t colorkey_yr;
+ uint32_t colorkey_ug;
+ uint32_t colorkey_vb;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
+ int16_t brightness;
+ uint16_t contrast;
+ uint16_t saturation;
+ uint32_t colorkey_mode;
+};
+
+struct armada_plane {
+ struct drm_plane base;
+ spinlock_t lock;
+ struct drm_framebuffer *old_fb;
+ uint32_t src_hw;
+ uint32_t dst_hw;
+ uint32_t dst_yx;
+ uint32_t ctrl0;
+ struct {
+ struct armada_vbl_event update;
+ struct armada_regs regs[13];
+ wait_queue_head_t wait;
+ } vbl;
+ struct armada_plane_properties prop;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+
+static void
+armada_ovl_update_attr(struct armada_plane_properties *prop,
+ struct armada_crtc *dcrtc)
+{
+ writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
+ writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
+ writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+
+ writel_relaxed(prop->brightness << 16 | prop->contrast,
+ dcrtc->base + LCD_SPU_CONTRAST);
+ /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
+ writel_relaxed(prop->saturation << 16,
+ dcrtc->base + LCD_SPU_SATURATION);
+ writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+ CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ dcrtc->base + LCD_SPU_DMA_CTRL1);
+
+ armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+ spin_unlock_irq(&dcrtc->irq_lock);
+}
+
+/* === Plane support === */
+static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+{
+ struct armada_plane *dplane = data;
+ struct drm_framebuffer *fb;
+
+ armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+
+ spin_lock(&dplane->lock);
+ fb = dplane->old_fb;
+ dplane->old_fb = NULL;
+ spin_unlock(&dplane->lock);
+
+ if (fb)
+ armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+}
+
+static unsigned armada_limit(int start, unsigned size, unsigned max)
+{
+ int end = start + size;
+ if (end < 0)
+ return 0;
+ if (start < 0)
+ start = 0;
+ return (unsigned)end > max ? max - start : end - start;
+}
+
+static int
+armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ uint32_t val, ctrl0;
+ unsigned idx = 0;
+ int ret;
+
+ crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
+ crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+ ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
+ CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+
+ /* Does the position/size result in nothing to display? */
+ if (crtc_w == 0 || crtc_h == 0) {
+ ctrl0 &= ~CFG_DMA_ENA;
+ }
+
+ /*
+ * FIXME: if the starting point is off screen, we need to
+ * adjust src_x, src_y, src_w, src_h appropriately, and
+ * according to the scale.
+ */
+
+ if (!dcrtc->plane) {
+ dcrtc->plane = plane;
+ armada_ovl_update_attr(&dplane->prop, dcrtc);
+ }
+
+ /* FIXME: overlay on an interlaced display */
+ /* Just updating the position/size? */
+ if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+ val = (src_h & 0xffff0000) | src_w >> 16;
+ dplane->src_hw = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
+ val = crtc_h << 16 | crtc_w;
+ dplane->dst_hw = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
+ val = crtc_y << 16 | crtc_x;
+ dplane->dst_yx = val;
+ writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+ return 0;
+ } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+ armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+ }
+
+ ret = wait_event_timeout(dplane->vbl.wait,
+ list_empty(&dplane->vbl.update.node),
+ HZ/25);
+ if (ret < 0)
+ return ret;
+
+ if (plane->fb != fb) {
+ struct armada_gem_object *obj = drm_fb_obj(fb);
+ uint32_t sy, su, sv;
+
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ drm_framebuffer_reference(fb);
+
+ if (plane->fb) {
+ struct drm_framebuffer *older_fb;
+
+ spin_lock_irq(&dplane->lock);
+ older_fb = dplane->old_fb;
+ dplane->old_fb = plane->fb;
+ spin_unlock_irq(&dplane->lock);
+ if (older_fb)
+ armada_drm_queue_unref_work(dcrtc->crtc.dev,
+ older_fb);
+ }
+
+ src_y >>= 16;
+ src_x >>= 16;
+ sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
+ src_x * fb->bits_per_pixel / 8;
+ su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
+ src_x;
+ sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
+ src_x;
+
+ armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ LCD_SPU_DMA_START_ADDR_Y0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ LCD_SPU_DMA_START_ADDR_U0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ LCD_SPU_DMA_START_ADDR_V0);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ LCD_SPU_DMA_START_ADDR_Y1);
+ armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ LCD_SPU_DMA_START_ADDR_U1);
+ armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ LCD_SPU_DMA_START_ADDR_V1);
+
+ val = fb->pitches[0] << 16 | fb->pitches[0];
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_PITCH_YC);
+ val = fb->pitches[1] << 16 | fb->pitches[2];
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_PITCH_UV);
+ }
+
+ val = (src_h & 0xffff0000) | src_w >> 16;
+ if (dplane->src_hw != val) {
+ dplane->src_hw = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_HPXL_VLN);
+ }
+ val = crtc_h << 16 | crtc_w;
+ if (dplane->dst_hw != val) {
+ dplane->dst_hw = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DZM_HPXL_VLN);
+ }
+ val = crtc_y << 16 | crtc_x;
+ if (dplane->dst_yx != val) {
+ dplane->dst_yx = val;
+ armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ LCD_SPU_DMA_OVSA_HPXL_VLN);
+ }
+ if (dplane->ctrl0 != ctrl0) {
+ dplane->ctrl0 = ctrl0;
+ armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+ CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
+ CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
+ CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
+ CFG_YUV2RGB) | CFG_DMA_ENA,
+ LCD_SPU_DMA_CTRL0);
+ }
+ if (idx) {
+ armada_reg_queue_end(dplane->vbl.regs, idx);
+ armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+ }
+ return 0;
+}
+
+static int armada_plane_disable(struct drm_plane *plane)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct drm_framebuffer *fb;
+ struct armada_crtc *dcrtc;
+
+ if (!dplane->base.crtc)
+ return 0;
+
+ dcrtc = drm_to_armada_crtc(dplane->base.crtc);
+ dcrtc->plane = NULL;
+
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
+ armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+ dplane->ctrl0 = 0;
+ spin_unlock_irq(&dcrtc->irq_lock);
+
+ /* Power down the Y/U/V FIFOs */
+ armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+ dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ spin_lock_irq(&dplane->lock);
+ fb = dplane->old_fb;
+ dplane->old_fb = NULL;
+ spin_unlock_irq(&dplane->lock);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
+ return 0;
+}
+
+static void armada_plane_destroy(struct drm_plane *plane)
+{
+ kfree(plane);
+}
+
+static int armada_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ struct armada_private *priv = plane->dev->dev_private;
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ bool update_attr = false;
+
+ if (property == priv->colorkey_prop) {
+#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
+ dplane->prop.colorkey_yr = CCC(K2R(val));
+ dplane->prop.colorkey_ug = CCC(K2G(val));
+ dplane->prop.colorkey_vb = CCC(K2B(val));
+#undef CCC
+ update_attr = true;
+ } else if (property == priv->colorkey_min_prop) {
+ dplane->prop.colorkey_yr &= ~0x00ff0000;
+ dplane->prop.colorkey_yr |= K2R(val) << 16;
+ dplane->prop.colorkey_ug &= ~0x00ff0000;
+ dplane->prop.colorkey_ug |= K2G(val) << 16;
+ dplane->prop.colorkey_vb &= ~0x00ff0000;
+ dplane->prop.colorkey_vb |= K2B(val) << 16;
+ update_attr = true;
+ } else if (property == priv->colorkey_max_prop) {
+ dplane->prop.colorkey_yr &= ~0xff000000;
+ dplane->prop.colorkey_yr |= K2R(val) << 24;
+ dplane->prop.colorkey_ug &= ~0xff000000;
+ dplane->prop.colorkey_ug |= K2G(val) << 24;
+ dplane->prop.colorkey_vb &= ~0xff000000;
+ dplane->prop.colorkey_vb |= K2B(val) << 24;
+ update_attr = true;
+ } else if (property == priv->colorkey_val_prop) {
+ dplane->prop.colorkey_yr &= ~0x0000ff00;
+ dplane->prop.colorkey_yr |= K2R(val) << 8;
+ dplane->prop.colorkey_ug &= ~0x0000ff00;
+ dplane->prop.colorkey_ug |= K2G(val) << 8;
+ dplane->prop.colorkey_vb &= ~0x0000ff00;
+ dplane->prop.colorkey_vb |= K2B(val) << 8;
+ update_attr = true;
+ } else if (property == priv->colorkey_alpha_prop) {
+ dplane->prop.colorkey_yr &= ~0x000000ff;
+ dplane->prop.colorkey_yr |= K2R(val);
+ dplane->prop.colorkey_ug &= ~0x000000ff;
+ dplane->prop.colorkey_ug |= K2G(val);
+ dplane->prop.colorkey_vb &= ~0x000000ff;
+ dplane->prop.colorkey_vb |= K2B(val);
+ update_attr = true;
+ } else if (property == priv->colorkey_mode_prop) {
+ dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+ dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+ update_attr = true;
+ } else if (property == priv->brightness_prop) {
+ dplane->prop.brightness = val - 256;
+ update_attr = true;
+ } else if (property == priv->contrast_prop) {
+ dplane->prop.contrast = val;
+ update_attr = true;
+ } else if (property == priv->saturation_prop) {
+ dplane->prop.saturation = val;
+ update_attr = true;
+ }
+
+ if (update_attr && dplane->base.crtc)
+ armada_ovl_update_attr(&dplane->prop,
+ drm_to_armada_crtc(dplane->base.crtc));
+
+ return 0;
+}
+
+static const struct drm_plane_funcs armada_plane_funcs = {
+ .update_plane = armada_plane_update,
+ .disable_plane = armada_plane_disable,
+ .destroy = armada_plane_destroy,
+ .set_property = armada_plane_set_property,
+};
+
+static const uint32_t armada_formats[] = {
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+ { CKMODE_DISABLE, "disabled" },
+ { CKMODE_Y, "Y component" },
+ { CKMODE_U, "U component" },
+ { CKMODE_V, "V component" },
+ { CKMODE_RGB, "RGB" },
+ { CKMODE_R, "R component" },
+ { CKMODE_G, "G component" },
+ { CKMODE_B, "B component" },
+};
+
+static int armada_overlay_create_properties(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ if (priv->colorkey_prop)
+ return 0;
+
+ priv->colorkey_prop = drm_property_create_range(dev, 0,
+ "colorkey", 0, 0xffffff);
+ priv->colorkey_min_prop = drm_property_create_range(dev, 0,
+ "colorkey_min", 0, 0xffffff);
+ priv->colorkey_max_prop = drm_property_create_range(dev, 0,
+ "colorkey_max", 0, 0xffffff);
+ priv->colorkey_val_prop = drm_property_create_range(dev, 0,
+ "colorkey_val", 0, 0xffffff);
+ priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
+ "colorkey_alpha", 0, 0xffffff);
+ priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
+ "colorkey_mode",
+ armada_drm_colorkey_enum_list,
+ ARRAY_SIZE(armada_drm_colorkey_enum_list));
+ priv->brightness_prop = drm_property_create_range(dev, 0,
+ "brightness", 0, 256 + 255);
+ priv->contrast_prop = drm_property_create_range(dev, 0,
+ "contrast", 0, 0x7fff);
+ priv->saturation_prop = drm_property_create_range(dev, 0,
+ "saturation", 0, 0x7fff);
+
+ if (!priv->colorkey_prop)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+{
+ struct armada_private *priv = dev->dev_private;
+ struct drm_mode_object *mobj;
+ struct armada_plane *dplane;
+ int ret;
+
+ ret = armada_overlay_create_properties(dev);
+ if (ret)
+ return ret;
+
+ dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
+ if (!dplane)
+ return -ENOMEM;
+
+ spin_lock_init(&dplane->lock);
+ init_waitqueue_head(&dplane->vbl.wait);
+ armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
+ dplane);
+
+ drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
+ armada_formats, ARRAY_SIZE(armada_formats), false);
+
+ dplane->prop.colorkey_yr = 0xfefefe00;
+ dplane->prop.colorkey_ug = 0x01010100;
+ dplane->prop.colorkey_vb = 0x01010100;
+ dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+ dplane->prop.brightness = 0;
+ dplane->prop.contrast = 0x4000;
+ dplane->prop.saturation = 0x4000;
+
+ mobj = &dplane->base.base;
+ drm_object_attach_property(mobj, priv->colorkey_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_min_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_max_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_val_prop,
+ 0x0101fe);
+ drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
+ 0x000000);
+ drm_object_attach_property(mobj, priv->colorkey_mode_prop,
+ CKMODE_RGB);
+ drm_object_attach_property(mobj, priv->brightness_prop, 256);
+ drm_object_attach_property(mobj, priv->contrast_prop,
+ dplane->prop.contrast);
+ drm_object_attach_property(mobj, priv->saturation_prop,
+ dplane->prop.saturation);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644
index 00000000000..00d0facb42f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_drm.h"
+#include "armada_output.h"
+#include "armada_slave.h"
+
+static int armada_drm_slave_get_modes(struct drm_connector *conn)
+{
+ struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+ int count = 0;
+
+ if (enc) {
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+ count = slave->slave_funcs->get_modes(enc, conn);
+ }
+
+ return count;
+}
+
+static void armada_drm_slave_destroy(struct drm_encoder *enc)
+{
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+ struct i2c_client *client = drm_i2c_encoder_get_client(enc);
+
+ if (slave->slave_funcs)
+ slave->slave_funcs->destroy(enc);
+ if (client)
+ i2c_put_adapter(client->adapter);
+
+ drm_encoder_cleanup(&slave->base);
+ kfree(slave);
+}
+
+static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
+ .destroy = armada_drm_slave_destroy,
+};
+
+static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
+ .get_modes = armada_drm_slave_get_modes,
+ .mode_valid = armada_drm_slave_encoder_mode_valid,
+ .best_encoder = armada_drm_connector_encoder,
+};
+
+static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
+ .dpms = drm_i2c_encoder_dpms,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = drm_i2c_encoder_prepare,
+ .commit = drm_i2c_encoder_commit,
+ .mode_set = drm_i2c_encoder_mode_set,
+ .detect = drm_i2c_encoder_detect,
+};
+
+static int
+armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
+{
+ const struct armada_drm_slave_config *config = data;
+ struct drm_encoder_slave *slave;
+ struct i2c_adapter *adap;
+ int ret;
+
+ conn->interlace_allowed = config->interlace_allowed;
+ conn->doublescan_allowed = config->doublescan_allowed;
+ conn->polled = config->polled;
+
+ drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
+
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave)
+ return -ENOMEM;
+
+ slave->base.possible_crtcs = config->crtcs;
+
+ adap = i2c_get_adapter(config->i2c_adapter_id);
+ if (!adap) {
+ kfree(slave);
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_encoder_init(conn->dev, &slave->base,
+ &armada_drm_slave_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret) {
+ DRM_ERROR("unable to init encoder\n");
+ i2c_put_adapter(adap);
+ kfree(slave);
+ return ret;
+ }
+
+ ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
+ i2c_put_adapter(adap);
+ if (ret) {
+ DRM_ERROR("unable to init encoder slave\n");
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
+
+ ret = slave->slave_funcs->create_resources(&slave->base, conn);
+ if (ret) {
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ ret = drm_mode_connector_attach_encoder(conn, &slave->base);
+ if (ret) {
+ armada_drm_slave_destroy(&slave->base);
+ return ret;
+ }
+
+ conn->encoder = &slave->base;
+
+ return ret;
+}
+
+static const struct armada_output_type armada_drm_conn_slave = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ .create = armada_drm_conn_slave_create,
+ .set_property = armada_drm_slave_encoder_set_property,
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+ const struct armada_drm_slave_config *config)
+{
+ return armada_output_create(dev, &armada_drm_conn_slave, config);
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644
index 00000000000..bf2374c96fc
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_SLAVE_H
+#define ARMADA_SLAVE_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+struct armada_drm_slave_config {
+ int i2c_adapter_id;
+ uint32_t crtcs;
+ uint8_t polled;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ struct i2c_board_info info;
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+ const struct armada_drm_slave_config *);
+
+#endif
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
new file mode 100644
index 00000000000..8a784c460c8
--- /dev/null
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -0,0 +1,17 @@
+config DRM_AST
+ tristate "AST server chips"
+ depends on DRM && PCI
+ select DRM_TTM
+ select FB_SYS_COPYAREA
+ select FB_SYS_FILLRECT
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_TTM
+ help
+ Say yes for experimental AST GPU driver. Do not enable
+ this driver without having a working -modesetting,
+ and a version of AST that knows to fail if KMS
+ is bound to the driver. These GPUs are commonly found
+ in server chipsets.
+
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/ast/Makefile
index c642ee0b238..171aa0622b6 100644
--- a/drivers/gpu/drm/i830/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,6 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i830-y := i830_drv.o i830_dma.o i830_irq.o
-obj-$(CONFIG_DRM_I830) += i830.o
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o
+
+obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
new file mode 100644
index 00000000000..5da4b62285f
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -0,0 +1,410 @@
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "ast_drv.h"
+MODULE_FIRMWARE("ast_dp501_fw.bin");
+
+int ast_load_dp501_microcode(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ static char *fw_name = "ast_dp501_fw.bin";
+ int err;
+ err = request_firmware(&ast->dp501_fw, fw_name, dev->dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void send_ack(struct ast_private *ast)
+{
+ u8 sendack;
+ sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
+ sendack |= 0x80;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
+}
+
+static void send_nack(struct ast_private *ast)
+{
+ u8 sendack;
+ sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
+ sendack &= ~0x80;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
+}
+
+static bool wait_ack(struct ast_private *ast)
+{
+ u8 waitack;
+ u32 retry = 0;
+ do {
+ waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
+ waitack &= 0x80;
+ udelay(100);
+ } while ((!waitack) && (retry++ < 1000));
+
+ if (retry < 1000)
+ return true;
+ else
+ return false;
+}
+
+static bool wait_nack(struct ast_private *ast)
+{
+ u8 waitack;
+ u32 retry = 0;
+ do {
+ waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
+ waitack &= 0x80;
+ udelay(100);
+ } while ((waitack) && (retry++ < 1000));
+
+ if (retry < 1000)
+ return true;
+ else
+ return false;
+}
+
+static void set_cmd_trigger(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
+}
+
+static void clear_cmd_trigger(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
+}
+
+#if 0
+static bool wait_fw_ready(struct ast_private *ast)
+{
+ u8 waitready;
+ u32 retry = 0;
+ do {
+ waitready = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
+ waitready &= 0x40;
+ udelay(100);
+ } while ((!waitready) && (retry++ < 1000));
+
+ if (retry < 1000)
+ return true;
+ else
+ return false;
+}
+#endif
+
+static bool ast_write_cmd(struct drm_device *dev, u8 data)
+{
+ struct ast_private *ast = dev->dev_private;
+ int retry = 0;
+ if (wait_nack(ast)) {
+ send_nack(ast);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
+ send_ack(ast);
+ set_cmd_trigger(ast);
+ do {
+ if (wait_ack(ast)) {
+ clear_cmd_trigger(ast);
+ send_nack(ast);
+ return true;
+ }
+ } while (retry++ < 100);
+ }
+ clear_cmd_trigger(ast);
+ send_nack(ast);
+ return false;
+}
+
+static bool ast_write_data(struct drm_device *dev, u8 data)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (wait_nack(ast)) {
+ send_nack(ast);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
+ send_ack(ast);
+ if (wait_ack(ast)) {
+ send_nack(ast);
+ return true;
+ }
+ }
+ send_nack(ast);
+ return false;
+}
+
+#if 0
+static bool ast_read_data(struct drm_device *dev, u8 *data)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 tmp;
+
+ *data = 0;
+
+ if (wait_ack(ast) == false)
+ return false;
+ tmp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd3, 0xff);
+ *data = tmp;
+ if (wait_nack(ast) == false) {
+ send_nack(ast);
+ return false;
+ }
+ send_nack(ast);
+ return true;
+}
+
+static void clear_cmd(struct ast_private *ast)
+{
+ send_nack(ast);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
+}
+#endif
+
+void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
+{
+ ast_write_cmd(dev, 0x40);
+ ast_write_data(dev, mode);
+
+ msleep(10);
+}
+
+static u32 get_fw_base(struct ast_private *ast)
+{
+ return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
+}
+
+bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 i, data;
+ u32 boot_address;
+
+ data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
+ if (data) {
+ boot_address = get_fw_base(ast);
+ for (i = 0; i < size; i += 4)
+ *(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i);
+ return true;
+ }
+ return false;
+}
+
+bool ast_launch_m68k(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 i, data, len = 0;
+ u32 boot_address;
+ u8 *fw_addr = NULL;
+ u8 jreg;
+
+ data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
+ if (!data) {
+
+ if (ast->dp501_fw_addr) {
+ fw_addr = ast->dp501_fw_addr;
+ len = 32*1024;
+ } else if (ast->dp501_fw) {
+ fw_addr = (u8 *)ast->dp501_fw->data;
+ len = ast->dp501_fw->size;
+ }
+ /* Get BootAddress */
+ ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
+ data = ast_mindwm(ast, 0x1e6e0004);
+ switch (data & 0x03) {
+ case 0:
+ boot_address = 0x44000000;
+ break;
+ default:
+ case 1:
+ boot_address = 0x48000000;
+ break;
+ case 2:
+ boot_address = 0x50000000;
+ break;
+ case 3:
+ boot_address = 0x60000000;
+ break;
+ }
+ boot_address -= 0x200000; /* -2MB */
+
+ /* copy image to buffer */
+ for (i = 0; i < len; i += 4) {
+ data = *(u32 *)(fw_addr + i);
+ ast_moutdwm(ast, boot_address + i, data);
+ }
+
+ /* Init SCU */
+ ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
+
+ /* Launch FW */
+ ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address);
+ ast_moutdwm(ast, 0x1e6e2100, 1);
+
+ /* Update Scratch */
+ data = ast_mindwm(ast, 0x1e6e2040) & 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */
+ data |= 0x800;
+ ast_moutdwm(ast, 0x1e6e2040, data);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */
+ jreg |= 0x02;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg);
+ }
+ return true;
+}
+
+u8 ast_get_dp501_max_clk(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 boot_address, offset, data;
+ u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
+
+ boot_address = get_fw_base(ast);
+
+ /* validate FW version */
+ offset = 0xf000;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & 0xf0) != 0x10) /* version: 1x */
+ return maxclk;
+
+ /* Read Link Capability */
+ offset = 0xf014;
+ *(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
+ if (linkcap[2] == 0) {
+ linkrate = linkcap[0];
+ linklanes = linkcap[1];
+ data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
+ if (data > 0xff)
+ data = 0xff;
+ maxclk = (u8)data;
+ }
+ return maxclk;
+}
+
+bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 i, boot_address, offset, data;
+
+ boot_address = get_fw_base(ast);
+
+ /* validate FW version */
+ offset = 0xf000;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & 0xf0) != 0x10)
+ return false;
+
+ /* validate PnP Monitor */
+ offset = 0xf010;
+ data = ast_mindwm(ast, boot_address + offset);
+ if (!(data & 0x01))
+ return false;
+
+ /* Read EDID */
+ offset = 0xf020;
+ for (i = 0; i < 128; i += 4) {
+ data = ast_mindwm(ast, boot_address + offset + i);
+ *(u32 *)(ediddata + i) = data;
+ }
+
+ return true;
+}
+
+static bool ast_init_dvo(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+ u32 data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if (!(jreg & 0x80)) {
+ /* Init SCU DVO Settings */
+ data = ast_read32(ast, 0x12008);
+ /* delay phase */
+ data &= 0xfffff8ff;
+ data |= 0x00000500;
+ ast_write32(ast, 0x12008, data);
+
+ if (ast->chip == AST2300) {
+ data = ast_read32(ast, 0x12084);
+ /* multi-pins for DVO single-edge */
+ data |= 0xfffe0000;
+ ast_write32(ast, 0x12084, data);
+
+ data = ast_read32(ast, 0x12088);
+ /* multi-pins for DVO single-edge */
+ data |= 0x000fffff;
+ ast_write32(ast, 0x12088, data);
+
+ data = ast_read32(ast, 0x12090);
+ /* multi-pins for DVO single-edge */
+ data &= 0xffffffcf;
+ data |= 0x00000020;
+ ast_write32(ast, 0x12090, data);
+ } else { /* AST2400 */
+ data = ast_read32(ast, 0x12088);
+ /* multi-pins for DVO single-edge */
+ data |= 0x30000000;
+ ast_write32(ast, 0x12088, data);
+
+ data = ast_read32(ast, 0x1208c);
+ /* multi-pins for DVO single-edge */
+ data |= 0x000000cf;
+ ast_write32(ast, 0x1208c, data);
+
+ data = ast_read32(ast, 0x120a4);
+ /* multi-pins for DVO single-edge */
+ data |= 0xffff0000;
+ ast_write32(ast, 0x120a4, data);
+
+ data = ast_read32(ast, 0x120a8);
+ /* multi-pins for DVO single-edge */
+ data |= 0x0000000f;
+ ast_write32(ast, 0x120a8, data);
+
+ data = ast_read32(ast, 0x12094);
+ /* multi-pins for DVO single-edge */
+ data |= 0x00000002;
+ ast_write32(ast, 0x12094, data);
+ }
+ }
+
+ /* Force to DVO */
+ data = ast_read32(ast, 0x1202c);
+ data &= 0xfffbffff;
+ ast_write32(ast, 0x1202c, data);
+
+ /* Init VGA DVO Settings */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);
+ return true;
+}
+
+void ast_init_3rdtx(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+ u32 data;
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ switch (jreg & 0x0e) {
+ case 0x04:
+ ast_init_dvo(dev);
+ break;
+ case 0x08:
+ ast_launch_m68k(dev);
+ break;
+ case 0x0c:
+ ast_init_dvo(dev);
+ break;
+ default:
+ if (ast->tx_chip_type == AST_TX_SIL164)
+ ast_init_dvo(dev);
+ else {
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ data = ast_read32(ast, 0x1202c);
+ data &= 0xfffcffff;
+ ast_write32(ast, 0, data);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
new file mode 100644
index 00000000000..cc04539c0ff
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -0,0 +1,144 @@
+#ifndef AST_DRAM_TABLES_H
+#define AST_DRAM_TABLES_H
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xFFFFFFFF },
+ { 0x0004, 0x00000089 },
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ { 0xFFFF, 0xFFFFFFFF }
+};
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ { 0x0004, 0x00000585 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ { 0xffff, 0xffffffff },
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ { 0x0004, 0x00000489 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ { 0xffff, 0xffffffff },
+};
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
new file mode 100644
index 00000000000..44074fbcf7f
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "ast_drv.h"
+
+int ast_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, ast_modeset, int, 0400);
+
+#define PCI_VENDOR_ASPEED 0x1a03
+
+static struct drm_driver driver;
+
+#define AST_VGA_DEVICE(id, info) { \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
+ .class_mask = 0xff0000, \
+ .vendor = PCI_VENDOR_ASPEED, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+ AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+ /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
+ {0, 0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+ast_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+
+
+static int ast_drm_freeze(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 1);
+ console_unlock();
+ return 0;
+}
+
+static int ast_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ ast_post_gpu(dev);
+
+ drm_mode_config_reset(dev);
+ drm_helper_resume_force_mode(dev);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ return error;
+}
+
+static int ast_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = ast_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int ast_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = ast_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+static int ast_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_resume(ddev);
+}
+
+static int ast_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+ return ast_drm_freeze(ddev);
+
+}
+
+static int ast_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_thaw(ddev);
+}
+
+static int ast_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ return ast_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops ast_pm_ops = {
+ .suspend = ast_pm_suspend,
+ .resume = ast_pm_resume,
+ .freeze = ast_pm_freeze,
+ .thaw = ast_pm_thaw,
+ .poweroff = ast_pm_poweroff,
+ .restore = ast_pm_resume,
+};
+
+static struct pci_driver ast_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = ast_pci_probe,
+ .remove = ast_pci_remove,
+ .driver.pm = &ast_pm_ops,
+};
+
+static const struct file_operations ast_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = ast_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
+
+ .load = ast_driver_load,
+ .unload = ast_driver_unload,
+
+ .fops = &ast_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_free_object = ast_gem_free_object,
+ .dumb_create = ast_dumb_create,
+ .dumb_map_offset = ast_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+};
+
+static int __init ast_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && ast_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (ast_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &ast_pci_driver);
+}
+static void __exit ast_exit(void)
+{
+ drm_pci_exit(&driver, &ast_pci_driver);
+}
+
+module_init(ast_init);
+module_exit(ast_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
new file mode 100644
index 00000000000..5d6a87573c3
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#ifndef __AST_DRV_H__
+#define __AST_DRV_H__
+
+#include <drm/drm_fb_helper.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#define DRIVER_AUTHOR "Dave Airlie"
+
+#define DRIVER_NAME "ast"
+#define DRIVER_DESC "AST"
+#define DRIVER_DATE "20120228"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define PCI_CHIP_AST2000 0x2000
+#define PCI_CHIP_AST2100 0x2010
+#define PCI_CHIP_AST1180 0x1180
+
+
+enum ast_chip {
+ AST2000,
+ AST2100,
+ AST1100,
+ AST2200,
+ AST2150,
+ AST2300,
+ AST2400,
+ AST1180,
+};
+
+enum ast_tx_chip {
+ AST_TX_NONE,
+ AST_TX_SIL164,
+ AST_TX_ITE66121,
+ AST_TX_DP501,
+};
+
+#define AST_DRAM_512Mx16 0
+#define AST_DRAM_1Gx16 1
+#define AST_DRAM_512Mx32 2
+#define AST_DRAM_1Gx32 3
+#define AST_DRAM_2Gx16 6
+#define AST_DRAM_4Gx16 7
+
+struct ast_fbdev;
+
+struct ast_private {
+ struct drm_device *dev;
+
+ void __iomem *regs;
+ void __iomem *ioregs;
+
+ enum ast_chip chip;
+ bool vga2_clone;
+ uint32_t dram_bus_width;
+ uint32_t dram_type;
+ uint32_t mclk;
+ uint32_t vram_size;
+
+ struct ast_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ } ttm;
+
+ struct drm_gem_object *cursor_cache;
+ uint64_t cursor_cache_gpu_addr;
+ /* Acces to this cache is protected by the crtc->mutex of the only crtc
+ * we have. */
+ struct ttm_bo_kmap_obj cache_kmap;
+ int next_cursor;
+ bool support_wide_screen;
+
+ enum ast_tx_chip tx_chip_type;
+ u8 dp501_maxclk;
+ u8 *dp501_fw_addr;
+ const struct firmware *dp501_fw; /* dp501 fw */
+};
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags);
+int ast_driver_unload(struct drm_device *dev);
+
+struct ast_gem_object;
+
+#define AST_IO_AR_PORT_WRITE (0x40)
+#define AST_IO_MISC_PORT_WRITE (0x42)
+#define AST_IO_SEQ_PORT (0x44)
+#define AST_DAC_INDEX_READ (0x3c7)
+#define AST_IO_DAC_INDEX_WRITE (0x48)
+#define AST_IO_DAC_DATA (0x49)
+#define AST_IO_GR_PORT (0x4E)
+#define AST_IO_CRTC_PORT (0x54)
+#define AST_IO_INPUT_STATUS1_READ (0x5A)
+#define AST_IO_MISC_PORT_READ (0x4C)
+
+#define __ast_read(x) \
+static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->regs + reg); \
+return val;\
+}
+
+__ast_read(8);
+__ast_read(16);
+__ast_read(32)
+
+#define __ast_io_read(x) \
+static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->ioregs + reg); \
+return val;\
+}
+
+__ast_io_read(8);
+__ast_io_read(16);
+__ast_io_read(32);
+
+#define __ast_write(x) \
+static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->regs + reg);\
+ }
+
+__ast_write(8);
+__ast_write(16);
+__ast_write(32);
+
+#define __ast_io_write(x) \
+static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->ioregs + reg);\
+ }
+
+__ast_io_write(8);
+__ast_io_write(16);
+#undef __ast_io_write
+
+static inline void ast_set_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t val)
+{
+ ast_io_write16(ast, base, ((u16)val << 8) | index);
+}
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val);
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index);
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask);
+
+static inline void ast_open_key(struct ast_private *ast)
+{
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
+}
+
+#define AST_VIDMEM_SIZE_8M 0x00800000
+#define AST_VIDMEM_SIZE_16M 0x01000000
+#define AST_VIDMEM_SIZE_32M 0x02000000
+#define AST_VIDMEM_SIZE_64M 0x04000000
+#define AST_VIDMEM_SIZE_128M 0x08000000
+
+#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
+struct ast_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+ struct drm_connector base;
+ struct ast_i2c_chan *i2c;
+};
+
+struct ast_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ struct drm_gem_object *cursor_bo;
+ uint64_t cursor_addr;
+ int cursor_width, cursor_height;
+ u8 offset_x, offset_y;
+};
+
+struct ast_encoder {
+ struct drm_encoder base;
+};
+
+struct ast_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct ast_fbdev {
+ struct drm_fb_helper helper;
+ struct ast_framebuffer afb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+};
+
+#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+#define to_ast_connector(x) container_of(x, struct ast_connector, base)
+#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
+#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
+
+struct ast_vbios_stdtable {
+ u8 misc;
+ u8 seq[4];
+ u8 crtc[25];
+ u8 ar[20];
+ u8 gr[9];
+};
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+struct ast_vbios_dclk_info {
+ u8 param1;
+ u8 param2;
+ u8 param3;
+};
+
+struct ast_vbios_mode_info {
+ struct ast_vbios_stdtable *std_table;
+ struct ast_vbios_enhtable *enh_table;
+};
+
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int ast_fbdev_init(struct drm_device *dev);
+void ast_fbdev_fini(struct drm_device *dev);
+void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+
+struct ast_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
+
+static inline struct ast_bo *
+ast_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct ast_bo, bo);
+}
+
+
+#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
+
+#define AST_MM_ALIGN_SHIFT 4
+#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+
+extern int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+extern void ast_gem_free_object(struct drm_gem_object *obj);
+extern int ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+int ast_mm_init(struct ast_private *ast);
+void ast_mm_fini(struct ast_private *ast);
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo);
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int ast_bo_unpin(struct ast_bo *bo);
+
+static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void ast_bo_unreserve(struct ast_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain);
+int ast_bo_push_sysram(struct ast_bo *bo);
+int ast_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* ast post */
+void ast_post_gpu(struct drm_device *dev);
+u32 ast_mindwm(struct ast_private *ast, u32 r);
+void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
+/* ast dp501 */
+int ast_load_dp501_microcode(struct drm_device *dev);
+void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
+bool ast_launch_m68k(struct drm_device *dev);
+bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
+bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
+u8 ast_get_dp501_max_clk(struct drm_device *dev);
+void ast_init_3rdtx(struct drm_device *dev);
+#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
new file mode 100644
index 00000000000..a28640f47c2
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include "ast_drv.h"
+
+static void ast_dirty_update(struct ast_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+ int ret = -EBUSY;
+ bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
+
+ obj = afbdev->afb.obj;
+ bo = gem_to_ast_bo(obj);
+
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+ if (drm_can_sleep())
+ ret = ast_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+ return;
+ }
+
+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ ast_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i <= y2; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_bo_unreserve(bo);
+}
+
+static void ast_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void ast_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ ast_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void ast_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ ast_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops astfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = ast_fillrect,
+ .fb_copyarea = ast_copyarea,
+ .fb_imageblit = ast_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int astfb_create_object(struct ast_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = ast_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int astfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
+ struct drm_device *dev = afbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ int size, ret;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct ast_bo *bo = NULL;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_ast_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->par = afbdev;
+
+ ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ goto out;
+
+ afbdev->sysram = sysram;
+ afbdev->size = size;
+
+ fb = &afbdev->afb.base;
+ afbdev->helper.fb = fb;
+ afbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "astdrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &astfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ ast_crtc->lut_r[regno] = red >> 8;
+ ast_crtc->lut_g[regno] = green >> 8;
+ ast_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ *red = ast_crtc->lut_r[regno] << 8;
+ *green = ast_crtc->lut_g[regno] << 8;
+ *blue = ast_crtc->lut_b[regno] << 8;
+}
+
+static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+ .gamma_set = ast_fb_gamma_set,
+ .gamma_get = ast_fb_gamma_get,
+ .fb_probe = astfb_create,
+};
+
+static void ast_fbdev_destroy(struct drm_device *dev,
+ struct ast_fbdev *afbdev)
+{
+ struct fb_info *info;
+ struct ast_framebuffer *afb = &afbdev->afb;
+ if (afbdev->helper.fbdev) {
+ info = afbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (afb->obj) {
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&afbdev->helper);
+
+ vfree(afbdev->sysram);
+ drm_framebuffer_unregister_private(&afb->base);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int ast_fbdev_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_fbdev *afbdev;
+ int ret;
+
+ afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
+ if (!afbdev)
+ return -ENOMEM;
+
+ ast->fbdev = afbdev;
+ afbdev->helper.funcs = &ast_fb_helper_funcs;
+ spin_lock_init(&afbdev->dirty_lock);
+ ret = drm_fb_helper_init(dev, &afbdev->helper,
+ 1, 1);
+ if (ret) {
+ kfree(afbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ drm_fb_helper_initial_config(&afbdev->helper, 32);
+ return 0;
+}
+
+void ast_fbdev_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ ast_fbdev_destroy(dev, ast->fbdev);
+ kfree(ast->fbdev);
+ ast->fbdev = NULL;
+}
+
+void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ fb_set_suspend(ast->fbdev->helper.fbdev, state);
+}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
new file mode 100644
index 00000000000..a2cc6be9798
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -0,0 +1,530 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "ast_drv.h"
+
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "ast_dram_tables.h"
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val)
+{
+ u8 tmp;
+ ast_io_write8(ast, base, index);
+ tmp = (ast_io_read8(ast, base + 1) & mask) | val;
+ ast_set_index_reg(ast, base, index, tmp);
+}
+
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1);
+ return ret;
+}
+
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1) & mask;
+ return ret;
+}
+
+
+static int ast_detect_chip(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, jreg;
+
+ if (dev->pdev->device == PCI_CHIP_AST1180) {
+ ast->chip = AST1100;
+ DRM_INFO("AST 1180 detected\n");
+ } else {
+ if (dev->pdev->revision >= 0x30) {
+ ast->chip = AST2400;
+ DRM_INFO("AST 2400 detected\n");
+ } else if (dev->pdev->revision >= 0x20) {
+ ast->chip = AST2300;
+ DRM_INFO("AST 2300 detected\n");
+ } else if (dev->pdev->revision >= 0x10) {
+ uint32_t data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ data = ast_read32(ast, 0x1207c);
+ switch (data & 0x0300) {
+ case 0x0200:
+ ast->chip = AST1100;
+ DRM_INFO("AST 1100 detected\n");
+ break;
+ case 0x0100:
+ ast->chip = AST2200;
+ DRM_INFO("AST 2200 detected\n");
+ break;
+ case 0x0000:
+ ast->chip = AST2150;
+ DRM_INFO("AST 2150 detected\n");
+ break;
+ default:
+ ast->chip = AST2100;
+ DRM_INFO("AST 2100 detected\n");
+ break;
+ }
+ ast->vga2_clone = false;
+ } else {
+ ast->chip = 2000;
+ DRM_INFO("AST 2000 detected\n");
+ }
+ }
+
+ switch (ast->chip) {
+ case AST1180:
+ ast->support_wide_screen = true;
+ break;
+ case AST2000:
+ ast->support_wide_screen = false;
+ break;
+ default:
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if (!(jreg & 0x80))
+ ast->support_wide_screen = true;
+ else if (jreg & 0x01)
+ ast->support_wide_screen = true;
+ else {
+ ast->support_wide_screen = false;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x1207c);
+ data &= 0x300;
+ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
+ }
+ break;
+ }
+
+ ast->tx_chip_type = AST_TX_NONE;
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
+ if (jreg & 0x80)
+ ast->tx_chip_type = AST_TX_SIL164;
+ if ((ast->chip == AST2300) || (ast->chip == AST2400)) {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ switch (jreg) {
+ case 0x04:
+ ast->tx_chip_type = AST_TX_SIL164;
+ break;
+ case 0x08:
+ ast->dp501_fw_addr = kzalloc(32*1024, GFP_KERNEL);
+ if (ast->dp501_fw_addr) {
+ /* backup firmware */
+ if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) {
+ kfree(ast->dp501_fw_addr);
+ ast->dp501_fw_addr = NULL;
+ }
+ }
+ /* fallthrough */
+ case 0x0c:
+ ast->tx_chip_type = AST_TX_DP501;
+ }
+ }
+
+ return 0;
+}
+
+static int ast_get_dram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, data2;
+ uint32_t denum, num, div, ref_pll;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x400)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
+
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
+ case 3:
+ div = 0x4;
+ break;
+ case 2:
+ case 1:
+ div = 0x2;
+ break;
+ default:
+ div = 0x1;
+ break;
+ }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ return 0;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
+ if (ast_fb->obj)
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs ast_fb_funcs = {
+ .destroy = ast_user_framebuffer_destroy,
+};
+
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
+ ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct drm_framebuffer *
+ast_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
+ if (!ast_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(ast_fb);
+ return ERR_PTR(ret);
+ }
+ return &ast_fb->base;
+}
+
+static const struct drm_mode_config_funcs ast_mode_funcs = {
+ .fb_create = ast_user_framebuffer_create,
+};
+
+static u32 ast_get_vram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+ u32 vram_size;
+ ast_open_key(ast);
+
+ vram_size = AST_VIDMEM_DEFAULT_SIZE;
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
+ switch (jreg & 3) {
+ case 0: vram_size = AST_VIDMEM_SIZE_8M; break;
+ case 1: vram_size = AST_VIDMEM_SIZE_16M; break;
+ case 2: vram_size = AST_VIDMEM_SIZE_32M; break;
+ case 3: vram_size = AST_VIDMEM_SIZE_64M; break;
+ }
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
+ switch (jreg & 0x03) {
+ case 1:
+ vram_size -= 0x100000;
+ break;
+ case 2:
+ vram_size -= 0x200000;
+ break;
+ case 3:
+ vram_size -= 0x400000;
+ break;
+ }
+
+ return vram_size;
+}
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct ast_private *ast;
+ int ret = 0;
+
+ ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
+ if (!ast)
+ return -ENOMEM;
+
+ dev->dev_private = ast;
+ ast->dev = dev;
+
+ ast->regs = pci_iomap(dev->pdev, 1, 0);
+ if (!ast->regs) {
+ ret = -EIO;
+ goto out_free;
+ }
+ ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ if (!ast->ioregs) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ ast_detect_chip(dev);
+
+ if (ast->chip != AST1180) {
+ ast_get_dram_info(dev);
+ ast->vram_size = ast_get_vram_info(dev);
+ DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ goto out_free;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&ast_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST2400 ||
+ ast->chip == AST1180) {
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+ } else {
+ dev->mode_config.max_width = 1600;
+ dev->mode_config.max_height = 1200;
+ }
+
+ ret = ast_mode_init(dev);
+ if (ret)
+ goto out_free;
+
+ ret = ast_fbdev_init(dev);
+ if (ret)
+ goto out_free;
+
+ return 0;
+out_free:
+ kfree(ast);
+ dev->dev_private = NULL;
+ return ret;
+}
+
+int ast_driver_unload(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ kfree(ast->dp501_fw_addr);
+ ast_mode_fini(dev);
+ ast_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ ast_mm_fini(ast);
+ pci_iounmap(dev->pdev, ast->ioregs);
+ pci_iounmap(dev->pdev, ast->regs);
+ kfree(ast);
+ return 0;
+}
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct ast_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = ast_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ast_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+static void ast_bo_unref(struct ast_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ *bo = NULL;
+}
+
+void ast_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ast_bo *ast_bo = gem_to_ast_bo(obj);
+
+ ast_bo_unref(&ast_bo);
+}
+
+
+static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+int
+ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct ast_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_ast_bo(obj);
+ *offset = ast_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
new file mode 100644
index 00000000000..114aee941d4
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -0,0 +1,1236 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "ast_drv.h"
+
+#include "ast_tables.h"
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+static inline void ast_load_palette_index(struct ast_private *ast,
+ u8 index, u8 red, u8 green,
+ u8 blue)
+{
+ ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, red);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, green);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, blue);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+}
+
+static void ast_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < 256; i++)
+ ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
+ ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+}
+
+static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+ u32 hborder, vborder;
+
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
+ color_index = VGAModeIndex - 1;
+ break;
+ case 16:
+ vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
+ color_index = HiCModeIndex;
+ break;
+ case 24:
+ case 32:
+ vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
+ color_index = TrueCModeIndex;
+ break;
+ default:
+ return false;
+ }
+
+ switch (crtc->mode.crtc_hdisplay) {
+ case 640:
+ vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+ break;
+ case 800:
+ vbios_mode->enh_table = &res_800x600[refresh_rate_index];
+ break;
+ case 1024:
+ vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
+ break;
+ case 1280:
+ if (crtc->mode.crtc_vdisplay == 800)
+ vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+ break;
+ case 1360:
+ vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
+ break;
+ case 1440:
+ vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+ break;
+ case 1600:
+ if (crtc->mode.crtc_vdisplay == 900)
+ vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+ break;
+ case 1680:
+ vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+ break;
+ case 1920:
+ if (crtc->mode.crtc_vdisplay == 1080)
+ vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+ break;
+ default:
+ return false;
+ }
+
+ refresh_rate = drm_mode_vrefresh(mode);
+ while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
+ vbios_mode->enh_table++;
+ if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
+ (vbios_mode->enh_table->refresh_rate == 0xff)) {
+ vbios_mode->enh_table--;
+ break;
+ }
+ }
+
+ hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+ vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+
+ adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+ adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp;
+ adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp +
+ vbios_mode->enh_table->hsync);
+
+ adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+ adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp;
+ adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp +
+ vbios_mode->enh_table->vsync);
+
+ refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+ mode_id = vbios_mode->enh_table->mode_id;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
+ if (vbios_mode->enh_table->flags & NewModeInfo) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
+ }
+
+ return true;
+
+
+}
+static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_stdtable *stdtable;
+ u32 i;
+ u8 jreg;
+
+ stdtable = vbios_mode->std_table;
+
+ jreg = stdtable->misc;
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+
+ /* Set SEQ */
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
+ for (i = 0; i < 4; i++) {
+ jreg = stdtable->seq[i];
+ if (!i)
+ jreg |= 0x20;
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
+ }
+
+ /* Set CRTC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+ for (i = 0; i < 25; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+
+ /* set AR */
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ for (i = 0; i < 20; i++) {
+ jreg = stdtable->ar[i];
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
+ }
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
+
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
+
+ /* Set GR */
+ for (i = 0; i < 9; i++)
+ ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
+}
+
+static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
+ u16 temp;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+
+ temp = (mode->crtc_htotal >> 3) - 5;
+ if (temp & 0x100)
+ jregAC |= 0x01; /* HT D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
+
+ temp = (mode->crtc_hdisplay >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x04; /* HDE D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
+
+ temp = (mode->crtc_hblank_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x10; /* HBS D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
+
+ temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
+ if (temp & 0x20)
+ jreg05 |= 0x80; /* HBE D[5] */
+ if (temp & 0x40)
+ jregAD |= 0x01; /* HBE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
+
+ temp = (mode->crtc_hsync_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x40; /* HRS D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
+
+ temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
+ if (temp & 0x20)
+ jregAD |= 0x04; /* HRE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+
+ /* vert timings */
+ temp = (mode->crtc_vtotal) - 2;
+ if (temp & 0x100)
+ jreg07 |= 0x01;
+ if (temp & 0x200)
+ jreg07 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x01;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
+
+ temp = (mode->crtc_vsync_start) - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x04;
+ if (temp & 0x200)
+ jreg07 |= 0x80;
+ if (temp & 0x400)
+ jregAE |= 0x08;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
+
+ temp = (mode->crtc_vsync_end - 1) & 0x3f;
+ if (temp & 0x10)
+ jregAE |= 0x20;
+ if (temp & 0x20)
+ jregAE |= 0x40;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
+
+ temp = mode->crtc_vdisplay - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x02;
+ if (temp & 0x200)
+ jreg07 |= 0x40;
+ if (temp & 0x400)
+ jregAE |= 0x02;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
+
+ temp = mode->crtc_vblank_start - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x08;
+ if (temp & 0x200)
+ jreg09 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
+
+ temp = mode->crtc_vblank_end - 1;
+ if (temp & 0x100)
+ jregAE |= 0x10;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
+}
+
+static void ast_set_offset_reg(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ u16 offset;
+
+ offset = crtc->primary->fb->pitches[0] >> 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
+}
+
+static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_vbios_dclk_info *clk_info;
+
+ clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
+ (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
+}
+
+static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
+
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ jregA0 = 0x70;
+ jregA3 = 0x01;
+ jregA8 = 0x00;
+ break;
+ case 15:
+ case 16:
+ jregA0 = 0x70;
+ jregA3 = 0x04;
+ jregA8 = 0x02;
+ break;
+ case 32:
+ jregA0 = 0x70;
+ jregA3 = 0x08;
+ jregA8 = 0x02;
+ break;
+ }
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+
+ /* Set Threshold */
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
+ } else if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
+ }
+}
+
+static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
+ jreg |= (vbios_mode->enh_table->flags & SyncNN);
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+}
+
+static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 addr;
+
+ addr = offset >> 2;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
+
+}
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ if (ast->chip == AST1180)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+ if (ast->tx_chip_type == AST_TX_DP501)
+ ast_set_dp501_video_output(crtc->dev, 1);
+ ast_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ if (ast->tx_chip_type == AST_TX_DP501)
+ ast_set_dp501_video_output(crtc->dev, 0);
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+ break;
+ }
+}
+
+static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/* ast is different - we will force move buffers out of VRAM */
+static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ struct ast_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ ast_fb = to_ast_framebuffer(fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ ast_bo_push_sysram(bo);
+ ast_bo_unreserve(bo);
+ }
+
+ ast_fb = to_ast_framebuffer(crtc->primary->fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ast_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&ast->fbdev->afb == ast_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ ast_bo_unreserve(bo);
+
+ ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int ast_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_mode_info vbios_mode;
+ bool ret;
+ if (ast->chip == AST1180) {
+ DRM_ERROR("AST 1180 modesetting not supported\n");
+ return -EINVAL;
+ }
+
+ ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
+ if (ret == false)
+ return -EINVAL;
+ ast_open_key(ast);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+
+ ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_offset_reg(crtc);
+ ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+
+ ast_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ return 0;
+}
+
+static void ast_crtc_disable(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_prepare(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_commit(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+}
+
+
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+ .dpms = ast_crtc_dpms,
+ .mode_fixup = ast_crtc_mode_fixup,
+ .mode_set = ast_crtc_mode_set,
+ .mode_set_base = ast_crtc_mode_set_base,
+ .disable = ast_crtc_disable,
+ .load_lut = ast_crtc_load_lut,
+ .prepare = ast_crtc_prepare,
+ .commit = ast_crtc_commit,
+
+};
+
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int end = (start + size > 256) ? 256 : start + size, i;
+
+ /* userspace palettes are always correct as is */
+ for (i = start; i < end; i++) {
+ ast_crtc->lut_r[i] = red[i] >> 8;
+ ast_crtc->lut_g[i] = green[i] >> 8;
+ ast_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ ast_crtc_load_lut(crtc);
+}
+
+
+static void ast_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs ast_crtc_funcs = {
+ .cursor_set = ast_cursor_set,
+ .cursor_move = ast_cursor_move,
+ .reset = ast_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ .gamma_set = ast_crtc_gamma_set,
+ .destroy = ast_crtc_destroy,
+};
+
+static int ast_crtc_init(struct drm_device *dev)
+{
+ struct ast_crtc *crtc;
+ int i;
+
+ crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
+ if (!crtc)
+ return -ENOMEM;
+
+ drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+ drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
+
+ for (i = 0; i < 256; i++) {
+ crtc->lut_r[i] = i;
+ crtc->lut_g[i] = i;
+ crtc->lut_b[i] = i;
+ }
+ return 0;
+}
+
+static void ast_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+
+static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+
+static const struct drm_encoder_funcs ast_enc_funcs = {
+ .destroy = ast_encoder_destroy,
+};
+
+static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool ast_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void ast_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void ast_encoder_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void ast_encoder_commit(struct drm_encoder *encoder)
+{
+
+}
+
+
+static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
+ .dpms = ast_encoder_dpms,
+ .mode_fixup = ast_mode_fixup,
+ .prepare = ast_encoder_prepare,
+ .commit = ast_encoder_commit,
+ .mode_set = ast_encoder_mode_set,
+};
+
+static int ast_encoder_init(struct drm_device *dev)
+{
+ struct ast_encoder *ast_encoder;
+
+ ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
+ if (!ast_encoder)
+ return -ENOMEM;
+
+ drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
+
+ ast_encoder->base.possible_crtcs = 1;
+ return 0;
+}
+
+static int ast_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct ast_private *ast = connector->dev->dev_private;
+ struct edid *edid;
+ int ret;
+ bool flags = false;
+ if (ast->tx_chip_type == AST_TX_DP501) {
+ ast->dp501_maxclk = 0xff;
+ edid = kmalloc(128, GFP_KERNEL);
+ if (!edid)
+ return -ENOMEM;
+
+ flags = ast_dp501_read_edid(connector->dev, (u8 *)edid);
+ if (flags)
+ ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev);
+ else
+ kfree(edid);
+ }
+ if (!flags)
+ edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
+ } else
+ drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ return 0;
+}
+
+static int ast_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct ast_private *ast = connector->dev->dev_private;
+ int flags = MODE_NOMODE;
+ uint32_t jtemp;
+
+ if (ast->support_wide_screen) {
+ if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
+ return MODE_OK;
+ if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
+ return MODE_OK;
+ if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
+ return MODE_OK;
+ if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
+ return MODE_OK;
+ if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
+ return MODE_OK;
+
+ if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST1180)) {
+ if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
+ return MODE_OK;
+
+ if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (jtemp & 0x01)
+ return MODE_NOMODE;
+ else
+ return MODE_OK;
+ }
+ }
+ }
+ switch (mode->hdisplay) {
+ case 640:
+ if (mode->vdisplay == 480) flags = MODE_OK;
+ break;
+ case 800:
+ if (mode->vdisplay == 600) flags = MODE_OK;
+ break;
+ case 1024:
+ if (mode->vdisplay == 768) flags = MODE_OK;
+ break;
+ case 1280:
+ if (mode->vdisplay == 1024) flags = MODE_OK;
+ break;
+ case 1600:
+ if (mode->vdisplay == 1200) flags = MODE_OK;
+ break;
+ default:
+ return flags;
+ }
+
+ return flags;
+}
+
+static void ast_connector_destroy(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ ast_i2c_destroy(ast_connector->i2c);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+ast_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
+ .mode_valid = ast_mode_valid,
+ .get_modes = ast_get_modes,
+ .best_encoder = ast_best_single_encoder,
+};
+
+static const struct drm_connector_funcs ast_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = ast_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = ast_connector_destroy,
+};
+
+static int ast_connector_init(struct drm_device *dev)
+{
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
+ if (!ast_connector)
+ return -ENOMEM;
+
+ connector = &ast_connector->base;
+ drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ast_connector->i2c = ast_i2c_create(dev);
+ if (!ast_connector->i2c)
+ DRM_ERROR("failed to add ddc bus for connector\n");
+
+ return 0;
+}
+
+/* allocate cursor cache and pin at start of VRAM */
+static int ast_cursor_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ int size;
+ int ret;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+
+ size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+
+ ret = ast_gem_create(dev, size, true, &obj);
+ if (ret)
+ return ret;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ goto fail;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ast_bo_unreserve(bo);
+ if (ret)
+ goto fail;
+
+ /* kmap the object */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
+ if (ret)
+ goto fail;
+
+ ast->cursor_cache = obj;
+ ast->cursor_cache_gpu_addr = gpu_addr;
+ DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+ return 0;
+fail:
+ return ret;
+}
+
+static void ast_cursor_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ ttm_bo_kunmap(&ast->cache_kmap);
+ drm_gem_object_unreference_unlocked(ast->cursor_cache);
+}
+
+int ast_mode_init(struct drm_device *dev)
+{
+ ast_cursor_init(dev);
+ ast_crtc_init(dev);
+ ast_encoder_init(dev);
+ ast_connector_init(dev);
+ return 0;
+}
+
+void ast_mode_fini(struct drm_device *dev)
+{
+ ast_cursor_fini(dev);
+}
+
+static int get_clock(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+ return val & 1 ? 1 : 0;
+}
+
+static int get_data(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+ return val & 1 ? 1 : 0;
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+ struct ast_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "AST i2c bit bus");
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 20;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = set_data;
+ i2c->bit.setscl = set_clock;
+ i2c->bit.getsda = get_data;
+ i2c->bit.getscl = get_clock;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c\n");
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+}
+
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+static void ast_show_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+static void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+{
+ union {
+ u32 ul;
+ u8 b[4];
+ } srcdata32[2], data32;
+ union {
+ u16 us;
+ u8 b[2];
+ } data16;
+ u32 csum = 0;
+ s32 alpha_dst_delta, last_alpha_dst_delta;
+ u8 *srcxor, *dstxor;
+ int i, j;
+ u32 per_pixel_copy, two_pixel_copy;
+
+ alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+ last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+ srcxor = src;
+ dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+ per_pixel_copy = width & 1;
+ two_pixel_copy = width >> 1;
+
+ for (j = 0; j < height; j++) {
+ for (i = 0; i < two_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+
+ dstxor += 4;
+ srcxor += 8;
+
+ }
+
+ for (i = 0; i < per_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ writew(data16.us, dstxor);
+ csum += (u32)data16.us;
+
+ dstxor += 2;
+ srcxor += 4;
+ }
+ dstxor += last_alpha_dst_delta;
+ }
+ return csum;
+}
+
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+ u32 csum;
+ int ret;
+ struct ttm_bo_kmap_obj uobj_map;
+ u8 *src, *dst;
+ bool src_isiomem, dst_isiomem;
+ if (!handle) {
+ ast_hide_cursor(crtc);
+ return 0;
+ }
+
+ if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ return -ENOENT;
+ }
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+
+ src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+ dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+
+ if (src_isiomem == true)
+ DRM_ERROR("src cursor bo should be in main memory\n");
+ if (dst_isiomem == false)
+ DRM_ERROR("dst bo should be in VRAM\n");
+
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ ttm_bo_kunmap(&uobj_map);
+ ast_bo_unreserve(bo);
+ {
+ u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ /* set pattern offset */
+ gpu_addr = ast->cursor_cache_gpu_addr;
+ gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+ gpu_addr >>= 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ }
+ ast_crtc->cursor_width = width;
+ ast_crtc->cursor_height = height;
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
+
+ ast_show_cursor(crtc);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct ast_private *ast = crtc->dev->dev_private;
+ int x_offset, y_offset;
+ u8 *sig;
+
+ sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(x, sig + AST_HWC_SIGNATURE_X);
+ writel(y, sig + AST_HWC_SIGNATURE_Y);
+
+ x_offset = ast_crtc->offset_x;
+ y_offset = ast_crtc->offset_y;
+ if (x < 0) {
+ x_offset = (-x) + ast_crtc->offset_x;
+ x = 0;
+ }
+
+ if (y < 0) {
+ y_offset = (-y) + ast_crtc->offset_y;
+ y = 0;
+ }
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+ /* dummy write to fire HWC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
new file mode 100644
index 00000000000..38d437f3a26
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -0,0 +1,1652 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <drm/drmP.h>
+#include "ast_drv.h"
+
+#include "ast_dram_tables.h"
+
+static void ast_init_dram_2300(struct drm_device *dev);
+
+static void
+ast_enable_vga(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_io_write8(ast, 0x43, 0x01);
+ ast_io_write8(ast, 0x42, 0x01);
+}
+
+#if 0 /* will use later */
+static bool
+ast_is_vga_enabled(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 ch;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ch = ast_io_read8(ast, 0x43);
+ if (ch) {
+ ast_open_key(ast);
+ ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+ return ch & 0x04;
+ }
+ }
+ return 0;
+}
+#endif
+
+static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
+
+static void
+ast_set_def_ext_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x8f; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
+
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ if (dev->pdev->revision >= 0x20)
+ ext_reg_info = extreginfo_ast2300;
+ else
+ ext_reg_info = extreginfo_ast2300a0;
+ } else
+ ext_reg_info = extreginfo;
+
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
+}
+
+u32 ast_mindwm(struct ast_private *ast, u32 r)
+{
+ uint32_t data;
+
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ do {
+ data = ast_read32(ast, 0xf004) & 0xffff0000;
+ } while (data != (r & 0xffff0000));
+ return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+}
+
+void ast_moutdwm(struct ast_private *ast, u32 r, u32 v)
+{
+ uint32_t data;
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+ do {
+ data = ast_read32(ast, 0xf004) & 0xffff0000;
+ } while (data != (r & 0xffff0000));
+ ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+}
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+#if 0 /* unused in DDX driver - here for completeness */
+static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+#endif
+
+static int cbrtest_ast2150(struct ast_private *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_private *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+
+static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
+ dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+ }
+ if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+
+
+static void ast_init_dram_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2000) {
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+ } else {/* AST2100/1100 */
+ if (ast->chip == AST2100 || ast->chip == 2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ }
+
+ while (dram_reg_info->index != 0xffff) {
+ if (dram_reg_info->index == 0xff00) {/* delay fn */
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else
+ ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ switch (ast->chip) {
+ case AST2000:
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ break;
+ case AST1100:
+ case AST2100:
+ case AST2200:
+ case AST2150:
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+void ast_post_gpu(struct drm_device *dev)
+{
+ u32 reg;
+ struct ast_private *ast = dev->dev_private;
+
+ pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+ reg |= 0x3;
+ pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+
+ ast_enable_vga(dev);
+ ast_open_key(ast);
+ ast_set_def_ext_reg(dev);
+
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
+
+ ast_init_3rdtx(dev);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE0 ((1 << 10) - 1)
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+static int mmc_test_burst(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000) {
+ return 0;
+ }
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0;
+ }
+ } while (!data);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 1;
+}
+
+static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = ast_mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+static int mmc_test_single(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000)
+ return 0;
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0;
+ }
+ } while (!data);
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return 1;
+}
+
+static int mmc_test_single2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = ast_mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+static int cbr_test(struct ast_private *ast)
+{
+ u32 data;
+ int i;
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test(ast)) != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_private *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test2(ast)) != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test3(struct ast_private *ast)
+{
+ if (!mmc_test_burst(ast, 0))
+ return 0;
+ if (!mmc_test_single(ast, 0))
+ return 0;
+ return 1;
+}
+
+static u32 cbr_scan3(struct ast_private *ast)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < 2; loop++) {
+ if (cbr_test3(ast))
+ break;
+ }
+ if (loop == 2)
+ return 0;
+ }
+ return 1;
+}
+
+static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
+ bool status = false;
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (retry++ > 10)
+ goto FINETUNE_DONE;
+ if (passcnt != 16) {
+ goto FINETUNE_START;
+ }
+ status = true;
+FINETUNE_DONE:
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ ast_moutdwm(ast, 0x1E6E0084, data);
+ return status;
+} /* finetuneDQI_L */
+
+static void finetuneDQSI(struct ast_private *ast)
+{
+ u32 dlli, dqsip, dqidly;
+ u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
+ u32 g_dqidly, g_dqsip, g_margin, g_side;
+ u16 pass[32][2][2];
+ char tag[2][76];
+
+ /* Disable DQI CBR */
+ reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
+ reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
+ reg_mcr18 &= 0x0000ffff;
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+
+ for (dlli = 0; dlli < 76; dlli++) {
+ tag[0][dlli] = 0x0;
+ tag[1][dlli] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ pass[dqidly][0][0] = 0xff;
+ pass[dqidly][0][1] = 0x0;
+ pass[dqidly][1][0] = 0xff;
+ pass[dqidly][1][1] = 0x0;
+ }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ passcnt[0] = passcnt[1] = 0;
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
+ ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0070, 0);
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
+ if (cbr_scan3(ast)) {
+ if (dlli == 0)
+ break;
+ passcnt[dqsip]++;
+ tag[dqsip][dlli] = 'P';
+ if (dlli < pass[dqidly][dqsip][0])
+ pass[dqidly][dqsip][0] = (u16) dlli;
+ if (dlli > pass[dqidly][dqsip][1])
+ pass[dqidly][dqsip][1] = (u16) dlli;
+ } else if (passcnt[dqsip] >= 5)
+ break;
+ else {
+ pass[dqidly][dqsip][0] = 0xff;
+ pass[dqidly][dqsip][1] = 0x0;
+ }
+ }
+ }
+ if (passcnt[0] == 0 && passcnt[1] == 0)
+ dqidly++;
+ }
+ /* Search margin */
+ g_dqidly = g_dqsip = g_margin = g_side = 0;
+
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
+ continue;
+ diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
+ if ((diff+2) < g_margin)
+ continue;
+ passcnt[0] = passcnt[1] = 0;
+ for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
+ for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
+ if (passcnt[0] > passcnt[1])
+ passcnt[0] = passcnt[1];
+ passcnt[1] = 0;
+ if (passcnt[0] > g_side)
+ passcnt[1] = passcnt[0] - g_side;
+ if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ } else if (passcnt[1] > 1 && g_side < 8) {
+ if (diff > g_margin)
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ }
+ }
+ }
+ reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
+
+}
+static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
+ bool status = false;
+
+ finetuneDQSI(ast);
+ if (finetuneDQI_L(ast, param) == false)
+ return status;
+
+CBR_START2:
+ dllmin[0] = dllmin[1] = 0xff;
+ dllmax[0] = dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli) {
+ dllmin[0] = dlli;
+ }
+ if (dllmax[0] < dlli) {
+ dllmax[0] = dlli;
+ }
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli) {
+ dllmin[1] = dlli;
+ }
+ if (dllmax[1] < dlli) {
+ dllmax[1] = dlli;
+ }
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (retry++ > 10)
+ goto CBR_DONE2;
+ if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ status = true;
+CBR_DONE2:
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
+ return status;
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA00761C | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA007636 | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+
+}
+
+static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr3_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x300;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr3_init_start;
+
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+
+}
+
+static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xAA009012 | trap_AC2;
+ break;
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA009023 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA00903B | trap_AC2;
+ break;
+ }
+ break;
+ default:
+ case 396:
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x00005040;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ } /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2, retry = 0;
+
+ddr2_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
+ ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ ast_moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ ast_moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = ast_mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ data = ast_mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
+
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x500;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Calibrate the DQSI delay */
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr2_init_start;
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = ast_mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+}
+
+static void ast_init_dram_2300(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_type = AST_DDR3;
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ param.dram_chipid = ast->dram_type;
+ param.dram_freq = ast->mclk;
+ param.vram_size = ast->vram_size;
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
new file mode 100644
index 00000000000..4c761dcea97
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_TABLES_H
+#define AST_TABLES_H
+
+/* Std. Table Index Definition */
+#define TextModeIndex 0
+#define EGAModeIndex 1
+#define VGAModeIndex 2
+#define HiCModeIndex 3
+#define TrueCModeIndex 4
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define SyncPP 0x00000000
+#define SyncPN 0x00000040
+#define SyncNP 0x00000080
+#define SyncNN 0x000000C0
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+#define NewModeInfo 0x00000200
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0A
+#define VCLK94_5 0x0B
+#define VCLK108 0x0C
+#define VCLK135 0x0D
+#define VCLK157_5 0x0E
+#define VCLK162 0x0F
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+#define VCLK71 0x15
+#define VCLK88_75 0x16
+#define VCLK119 0x17
+#define VCLK85_5 0x18
+#define VCLK97_75 0x19
+
+static struct ast_vbios_dclk_info dclk_table[] = {
+ {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
+ {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
+ {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
+ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x77, 0x58, 0x80}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
+};
+
+static struct ast_vbios_stdtable vbios_stdtable[] = {
+ /* MD_2_3_400 */
+ {
+ 0x67,
+ {0x00,0x03,0x00,0x02},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
+ 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
+ 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x0c,0x00,0x0f,0x08},
+ {0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
+ 0xff}
+ },
+ /* Mode12/ExtEGATable */
+ {
+ 0xe3,
+ {0x01,0x0f,0x00,0x06},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x01,0x00,0x0f,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtVGATable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtHiCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtTrueCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+};
+
+static struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
+ (SyncNN | Charx8Dot) , 75, 3, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
+ (SyncNN | Charx8Dot) , 85, 4, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
+ (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
+};
+
+static struct ast_vbios_enhtable res_800x600[] = {
+ {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
+};
+
+
+static struct ast_vbios_enhtable res_1024x768[] = {
+ {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
+};
+
+static struct ast_vbios_enhtable res_1280x1024[] = {
+ {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
+};
+
+static struct ast_vbios_enhtable res_1600x1200[] = {
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
+};
+
+/* 16:9 */
+static struct ast_vbios_enhtable res_1360x768[] = {
+ {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
+ {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* end */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x39 },
+};
+
+static struct ast_vbios_enhtable res_1600x900[] = {
+ {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A },
+ {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* end */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x3A }
+};
+
+static struct ast_vbios_enhtable res_1920x1080[] = {
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x38 },
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x38 },
+};
+
+
+/* 16:10 */
+static struct ast_vbios_enhtable res_1280x800[] = {
+ {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 35 },
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 },
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x35 },
+
+};
+
+static struct ast_vbios_enhtable res_1440x900[] = {
+ {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x36 },
+};
+
+static struct ast_vbios_enhtable res_1680x1050[] = {
+ {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x37 },
+};
+
+static struct ast_vbios_enhtable res_1920x1200[] = {
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 },
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 },
+};
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
new file mode 100644
index 00000000000..b8246227bab
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "ast_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct ast_private *
+ast_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct ast_private, ttm.bdev);
+}
+
+static int
+ast_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+ast_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &ast_ttm_mem_global_init;
+ global_ref->release = &ast_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+static void
+ast_ttm_global_release(struct ast_private *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct ast_bo *bo;
+
+ bo = container_of(tbo, struct ast_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &ast_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct ast_bo *astbo = ast_bo(bo);
+
+ if (!ast_ttm_bo_is_ast_bo(bo))
+ return;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
+ *pl = astbo->placement;
+}
+
+static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ struct ast_bo *astbo = ast_bo(bo);
+
+ return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
+}
+
+static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ast_private *ast = ast_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int ast_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void ast_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func ast_tt_backend_func = {
+ .destroy = &ast_ttm_backend_destroy,
+};
+
+
+static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &ast_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver ast_bo_driver = {
+ .ttm_tt_create = ast_ttm_tt_create,
+ .ttm_tt_populate = ast_ttm_tt_populate,
+ .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
+ .init_mem_type = ast_bo_init_mem_type,
+ .evict_flags = ast_bo_evict_flags,
+ .move = ast_bo_move,
+ .verify_access = ast_bo_verify_access,
+ .io_mem_reserve = &ast_ttm_io_mem_reserve,
+ .io_mem_free = &ast_ttm_io_mem_free,
+};
+
+int ast_mm_init(struct ast_private *ast)
+{
+ int ret;
+ struct drm_device *dev = ast->dev;
+ struct ttm_bo_device *bdev = &ast->ttm.bdev;
+
+ ret = ast_ttm_global_init(ast);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&ast->ttm.bdev,
+ ast->ttm.bo_global_ref.ref.object,
+ &ast_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ ast->vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
+ return 0;
+}
+
+void ast_mm_fini(struct ast_private *ast)
+{
+ ttm_bo_device_release(&ast->ttm.bdev);
+
+ ast_ttm_global_release(ast);
+
+ arch_phys_wc_del(ast->fb_mtrr);
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_bo *astbo;
+ size_t acc_size;
+ int ret;
+
+ astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
+ if (!astbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &astbo->gem, size);
+ if (ret) {
+ kfree(astbo);
+ return ret;
+ }
+
+ astbo->bo.bdev = &ast->ttm.bdev;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
+ sizeof(struct ast_bo));
+
+ ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
+ ttm_bo_type_device, &astbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, ast_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pastbo = astbo;
+ return 0;
+}
+
+static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ }
+
+ ast_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ return 0;
+}
+
+int ast_bo_unpin(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ast_bo_push_sysram(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int ast_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct ast_private *ast;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ ast = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bb..c399dea27a3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -31,7 +31,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <linux/export.h>
+#include <drm/drmP.h>
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
@@ -113,7 +114,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
- gart_info->table_mask);
+ (unsigned long long)gart_info->table_mask);
ret = 1;
goto done;
}
@@ -152,7 +153,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (entry->busaddr[i] == 0) {
+ if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
new file mode 100644
index 00000000000..5f8b0c2b9a4
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -0,0 +1,12 @@
+config DRM_BOCHS
+ tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
+ depends on DRM && PCI
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_TTM
+ help
+ Choose this option for qemu.
+ If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile
new file mode 100644
index 00000000000..844a5561492
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
+
+obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
new file mode 100644
index 00000000000..7eb52dd44b0
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -0,0 +1,163 @@
+#include <linux/io.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_page_alloc.h>
+
+/* ---------------------------------------------------------------------- */
+
+#define VBE_DISPI_IOPORT_INDEX 0x01CE
+#define VBE_DISPI_IOPORT_DATA 0x01CF
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+#define VBE_DISPI_ID5 0xB0C5
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+#define VBE_DISPI_LFB_ENABLED 0x40
+#define VBE_DISPI_NOCLEARMEM 0x80
+
+/* ---------------------------------------------------------------------- */
+
+enum bochs_types {
+ BOCHS_QEMU_STDVGA,
+ BOCHS_UNKNOWN,
+};
+
+struct bochs_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct bochs_device {
+ /* hw */
+ void __iomem *mmio;
+ int ioports;
+ void __iomem *fb_map;
+ unsigned long fb_base;
+ unsigned long fb_size;
+
+ /* mode */
+ u16 xres;
+ u16 yres;
+ u16 yres_virtual;
+ u32 stride;
+ u32 bpp;
+
+ /* drm */
+ struct drm_device *dev;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ bool mode_config_initialized;
+
+ /* ttm */
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ bool initialized;
+ } ttm;
+
+ /* fbdev */
+ struct {
+ struct bochs_framebuffer gfb;
+ struct drm_fb_helper helper;
+ int size;
+ bool initialized;
+ } fb;
+};
+
+#define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base)
+
+struct bochs_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+
+static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct bochs_bo, bo);
+}
+
+static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct bochs_bo, gem);
+}
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* bochs_hw.c */
+int bochs_hw_init(struct drm_device *dev, uint32_t flags);
+void bochs_hw_fini(struct drm_device *dev);
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+ struct drm_display_mode *mode);
+void bochs_hw_setbase(struct bochs_device *bochs,
+ int x, int y, u64 addr);
+
+/* bochs_mm.c */
+int bochs_mm_init(struct bochs_device *bochs);
+void bochs_mm_fini(struct bochs_device *bochs);
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int bochs_gem_init_object(struct drm_gem_object *obj);
+void bochs_gem_free_object(struct drm_gem_object *obj);
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
+int bochs_framebuffer_init(struct drm_device *dev,
+ struct bochs_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int bochs_bo_unpin(struct bochs_bo *bo);
+
+extern const struct drm_mode_config_funcs bochs_mode_funcs;
+
+/* bochs_kms.c */
+int bochs_kms_init(struct bochs_device *bochs);
+void bochs_kms_fini(struct bochs_device *bochs);
+
+/* bochs_fbdev.c */
+int bochs_fbdev_init(struct bochs_device *bochs);
+void bochs_fbdev_fini(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
new file mode 100644
index 00000000000..9c13df29fd2
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -0,0 +1,222 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "bochs.h"
+
+static bool enable_fbdev = true;
+module_param_named(fbdev, enable_fbdev, bool, 0444);
+MODULE_PARM_DESC(fbdev, "register fbdev device");
+
+/* ---------------------------------------------------------------------- */
+/* drm interface */
+
+static int bochs_unload(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+
+ bochs_fbdev_fini(bochs);
+ bochs_kms_fini(bochs);
+ bochs_mm_fini(bochs);
+ bochs_hw_fini(dev);
+ kfree(bochs);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+static int bochs_load(struct drm_device *dev, unsigned long flags)
+{
+ struct bochs_device *bochs;
+ int ret;
+
+ bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+ if (bochs == NULL)
+ return -ENOMEM;
+ dev->dev_private = bochs;
+ bochs->dev = dev;
+
+ ret = bochs_hw_init(dev, flags);
+ if (ret)
+ goto err;
+
+ ret = bochs_mm_init(bochs);
+ if (ret)
+ goto err;
+
+ ret = bochs_kms_init(bochs);
+ if (ret)
+ goto err;
+
+ if (enable_fbdev)
+ bochs_fbdev_init(bochs);
+
+ return 0;
+
+err:
+ bochs_unload(dev);
+ return ret;
+}
+
+static const struct file_operations bochs_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = bochs_mmap,
+};
+
+static struct drm_driver bochs_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
+ .load = bochs_load,
+ .unload = bochs_unload,
+ .fops = &bochs_fops,
+ .name = "bochs-drm",
+ .desc = "bochs dispi vga interface (qemu stdvga)",
+ .date = "20130925",
+ .major = 1,
+ .minor = 0,
+ .gem_free_object = bochs_gem_free_object,
+ .dumb_create = bochs_dumb_create,
+ .dumb_map_offset = bochs_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+};
+
+/* ---------------------------------------------------------------------- */
+/* pm interface */
+
+static int bochs_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct bochs_device *bochs = drm_dev->dev_private;
+
+ drm_kms_helper_poll_disable(drm_dev);
+
+ if (bochs->fb.initialized) {
+ console_lock();
+ fb_set_suspend(bochs->fb.helper.fbdev, 1);
+ console_unlock();
+ }
+
+ return 0;
+}
+
+static int bochs_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct bochs_device *bochs = drm_dev->dev_private;
+
+ drm_helper_resume_force_mode(drm_dev);
+
+ if (bochs->fb.initialized) {
+ console_lock();
+ fb_set_suspend(bochs->fb.helper.fbdev, 0);
+ console_unlock();
+ }
+
+ drm_kms_helper_poll_enable(drm_dev);
+ return 0;
+}
+
+static const struct dev_pm_ops bochs_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
+ bochs_pm_resume)
+};
+
+/* ---------------------------------------------------------------------- */
+/* pci interface */
+
+static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+ remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+ kfree(ap);
+
+ return 0;
+}
+
+static int bochs_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = bochs_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
+
+ return drm_get_pci_dev(pdev, ent, &bochs_driver);
+}
+
+static void bochs_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = {
+ {
+ .vendor = 0x1234,
+ .device = 0x1111,
+ .subvendor = 0x1af4,
+ .subdevice = 0x1100,
+ .driver_data = BOCHS_QEMU_STDVGA,
+ },
+ {
+ .vendor = 0x1234,
+ .device = 0x1111,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = BOCHS_UNKNOWN,
+ },
+ { /* end of list */ }
+};
+
+static struct pci_driver bochs_pci_driver = {
+ .name = "bochs-drm",
+ .id_table = bochs_pci_tbl,
+ .probe = bochs_pci_probe,
+ .remove = bochs_pci_remove,
+ .driver.pm = &bochs_pm_ops,
+};
+
+/* ---------------------------------------------------------------------- */
+/* module init/exit */
+
+static int __init bochs_init(void)
+{
+ return drm_pci_init(&bochs_driver, &bochs_pci_driver);
+}
+
+static void __exit bochs_exit(void)
+{
+ drm_pci_exit(&bochs_driver, &bochs_pci_driver);
+}
+
+module_init(bochs_init);
+module_exit(bochs_exit);
+
+MODULE_DEVICE_TABLE(pci, bochs_pci_tbl);
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
new file mode 100644
index 00000000000..561b8447412
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -0,0 +1,214 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static struct fb_ops bochsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int bochsfb_create_object(struct bochs_device *bochs,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = bochs->dev;
+ struct drm_gem_object *gobj;
+ u32 size;
+ int ret = 0;
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = bochs_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int bochsfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct bochs_device *bochs =
+ container_of(helper, struct bochs_device, fb.helper);
+ struct drm_device *dev = bochs->dev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ struct drm_gem_object *gobj = NULL;
+ struct bochs_bo *bo = NULL;
+ int size, ret;
+
+ if (sizes->surface_bpp != 32)
+ return -EINVAL;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ /* alloc, pin & map bo */
+ ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_bochs_bo(gobj);
+
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin fbcon\n");
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
+ &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon\n");
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ttm_bo_unreserve(&bo->bo);
+
+ /* init fb device */
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = &bochs->fb.helper;
+
+ ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ bochs->fb.size = size;
+
+ /* setup helper */
+ fb = &bochs->fb.gfb.base;
+ bochs->fb.helper.fb = fb;
+ bochs->fb.helper.fbdev = info;
+
+ strcpy(info->fix.id, "bochsdrmfb");
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &bochsfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+#if 0
+ /* FIXME: get this right for mmap(/dev/fb0) */
+ info->fix.smem_start = bochs_bo_mmap_offset(bo);
+ info->fix.smem_len = size;
+#endif
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bochs_fbdev_destroy(struct bochs_device *bochs)
+{
+ struct bochs_framebuffer *gfb = &bochs->fb.gfb;
+ struct fb_info *info;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (bochs->fb.helper.fbdev) {
+ info = bochs->fb.helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ drm_fb_helper_fini(&bochs->fb.helper);
+ drm_framebuffer_unregister_private(&gfb->base);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ *red = regno;
+ *green = regno;
+ *blue = regno;
+}
+
+static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+ .gamma_set = bochs_fb_gamma_set,
+ .gamma_get = bochs_fb_gamma_get,
+ .fb_probe = bochsfb_create,
+};
+
+int bochs_fbdev_init(struct bochs_device *bochs)
+{
+ int ret;
+
+ bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
+ 1, 1);
+ if (ret)
+ return ret;
+
+ drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+ drm_helper_disable_unused_functions(bochs->dev);
+ drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+
+ bochs->fb.initialized = true;
+ return 0;
+}
+
+void bochs_fbdev_fini(struct bochs_device *bochs)
+{
+ if (!bochs->fb.initialized)
+ return;
+
+ bochs_fbdev_destroy(bochs);
+ bochs->fb.initialized = false;
+}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
new file mode 100644
index 00000000000..dbe619e6aab
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
+{
+ if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
+ return;
+
+ if (bochs->mmio) {
+ int offset = ioport - 0x3c0 + 0x400;
+ writeb(val, bochs->mmio + offset);
+ } else {
+ outb(val, ioport);
+ }
+}
+
+static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
+{
+ u16 ret = 0;
+
+ if (bochs->mmio) {
+ int offset = 0x500 + (reg << 1);
+ ret = readw(bochs->mmio + offset);
+ } else {
+ outw(reg, VBE_DISPI_IOPORT_INDEX);
+ ret = inw(VBE_DISPI_IOPORT_DATA);
+ }
+ return ret;
+}
+
+static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
+{
+ if (bochs->mmio) {
+ int offset = 0x500 + (reg << 1);
+ writew(val, bochs->mmio + offset);
+ } else {
+ outw(reg, VBE_DISPI_IOPORT_INDEX);
+ outw(val, VBE_DISPI_IOPORT_DATA);
+ }
+}
+
+int bochs_hw_init(struct drm_device *dev, uint32_t flags)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ unsigned long addr, size, mem, ioaddr, iosize;
+ u16 id;
+
+ if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
+ (pdev->resource[2].flags & IORESOURCE_MEM)) {
+ /* mmio bar with vga and bochs registers present */
+ if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
+ DRM_ERROR("Cannot request mmio region\n");
+ return -EBUSY;
+ }
+ ioaddr = pci_resource_start(pdev, 2);
+ iosize = pci_resource_len(pdev, 2);
+ bochs->mmio = ioremap(ioaddr, iosize);
+ if (bochs->mmio == NULL) {
+ DRM_ERROR("Cannot map mmio region\n");
+ return -ENOMEM;
+ }
+ } else {
+ ioaddr = VBE_DISPI_IOPORT_INDEX;
+ iosize = 2;
+ if (!request_region(ioaddr, iosize, "bochs-drm")) {
+ DRM_ERROR("Cannot request ioports\n");
+ return -EBUSY;
+ }
+ bochs->ioports = 1;
+ }
+
+ id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
+ mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K)
+ * 64 * 1024;
+ if ((id & 0xfff0) != VBE_DISPI_ID0) {
+ DRM_ERROR("ID mismatch\n");
+ return -ENODEV;
+ }
+
+ if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0)
+ return -ENODEV;
+ addr = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ if (addr == 0)
+ return -ENODEV;
+ if (size != mem) {
+ DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n",
+ size, mem);
+ size = min(size, mem);
+ }
+
+ if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
+ DRM_ERROR("Cannot request framebuffer\n");
+ return -EBUSY;
+ }
+
+ bochs->fb_map = ioremap(addr, size);
+ if (bochs->fb_map == NULL) {
+ DRM_ERROR("Cannot map framebuffer\n");
+ return -ENOMEM;
+ }
+ bochs->fb_base = addr;
+ bochs->fb_size = size;
+
+ DRM_INFO("Found bochs VGA, ID 0x%x.\n", id);
+ DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n",
+ size / 1024, addr,
+ bochs->ioports ? "ioports" : "mmio",
+ ioaddr);
+ return 0;
+}
+
+void bochs_hw_fini(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+
+ if (bochs->mmio)
+ iounmap(bochs->mmio);
+ if (bochs->ioports)
+ release_region(VBE_DISPI_IOPORT_INDEX, 2);
+ if (bochs->fb_map)
+ iounmap(bochs->fb_map);
+ pci_release_regions(dev->pdev);
+}
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+ struct drm_display_mode *mode)
+{
+ bochs->xres = mode->hdisplay;
+ bochs->yres = mode->vdisplay;
+ bochs->bpp = 32;
+ bochs->stride = mode->hdisplay * (bochs->bpp / 8);
+ bochs->yres_virtual = bochs->fb_size / bochs->stride;
+
+ DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
+ bochs->xres, bochs->yres, bochs->bpp,
+ bochs->yres_virtual);
+
+ bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT,
+ bochs->yres_virtual);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0);
+
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
+ VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+}
+
+void bochs_hw_setbase(struct bochs_device *bochs,
+ int x, int y, u64 addr)
+{
+ unsigned long offset = (unsigned long)addr +
+ y * bochs->stride +
+ x * (bochs->bpp / 8);
+ int vy = offset / bochs->stride;
+ int vx = (offset % bochs->stride) * 8 / bochs->bpp;
+
+ DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n",
+ x, y, addr, offset, vx, vy);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
new file mode 100644
index 00000000000..dcf2e55f4ae
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static int defx = 1024;
+static int defy = 768;
+
+module_param(defx, int, 0444);
+module_param(defy, int, 0444);
+MODULE_PARM_DESC(defx, "default x resolution");
+MODULE_PARM_DESC(defy, "default y resolution");
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ default:
+ return;
+ }
+}
+
+static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+ struct bochs_framebuffer *bochs_fb;
+ struct bochs_bo *bo;
+ u64 gpu_addr = 0;
+ int ret;
+
+ if (old_fb) {
+ bochs_fb = to_bochs_framebuffer(old_fb);
+ bo = gem_to_bochs_bo(bochs_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret) {
+ DRM_ERROR("failed to reserve old_fb bo\n");
+ } else {
+ bochs_bo_unpin(bo);
+ ttm_bo_unreserve(&bo->bo);
+ }
+ }
+
+ if (WARN_ON(crtc->primary->fb == NULL))
+ return -EINVAL;
+
+ bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
+ bo = gem_to_bochs_bo(bochs_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ttm_bo_unreserve(&bo->bo);
+ bochs_hw_setbase(bochs, x, y, gpu_addr);
+ return 0;
+}
+
+static int bochs_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+
+ bochs_hw_setmode(bochs, mode);
+ bochs_crtc_mode_set_base(crtc, x, y, old_fb);
+ return 0;
+}
+
+static void bochs_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs bochs_crtc_funcs = {
+ .gamma_set = bochs_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
+ .dpms = bochs_crtc_dpms,
+ .mode_fixup = bochs_crtc_mode_fixup,
+ .mode_set = bochs_crtc_mode_set,
+ .mode_set_base = bochs_crtc_mode_set_base,
+ .prepare = bochs_crtc_prepare,
+ .commit = bochs_crtc_commit,
+ .load_lut = bochs_crtc_load_lut,
+};
+
+static void bochs_crtc_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_crtc *crtc = &bochs->crtc;
+
+ drm_crtc_init(dev, crtc, &bochs_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_crtc_helper_add(crtc, &bochs_helper_funcs);
+}
+
+static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void bochs_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void bochs_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+}
+
+static void bochs_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void bochs_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
+ .dpms = bochs_encoder_dpms,
+ .mode_fixup = bochs_encoder_mode_fixup,
+ .mode_set = bochs_encoder_mode_set,
+ .prepare = bochs_encoder_prepare,
+ .commit = bochs_encoder_commit,
+};
+
+static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void bochs_encoder_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_encoder *encoder = &bochs->encoder;
+
+ encoder->possible_crtcs = 0x1;
+ drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
+}
+
+
+int bochs_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, 8192, 8192);
+ drm_set_preferred_mode(connector, defx, defy);
+ return count;
+}
+
+static int bochs_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct bochs_device *bochs =
+ container_of(connector, struct bochs_device, connector);
+ unsigned long size = mode->hdisplay * mode->vdisplay * 4;
+
+ /*
+ * Make sure we can fit two framebuffers into video memory.
+ * This allows up to 1600x1200 with 16 MB (default size).
+ * If you want more try this:
+ * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
+ */
+ if (size * 2 > bochs->fb_size)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+bochs_connector_best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status bochs_connector_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
+ .get_modes = bochs_connector_get_modes,
+ .mode_valid = bochs_connector_mode_valid,
+ .best_encoder = bochs_connector_best_encoder,
+};
+
+struct drm_connector_funcs bochs_connector_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = bochs_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+};
+
+static void bochs_connector_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_connector *connector = &bochs->connector;
+
+ drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector,
+ &bochs_connector_connector_helper_funcs);
+}
+
+
+int bochs_kms_init(struct bochs_device *bochs)
+{
+ drm_mode_config_init(bochs->dev);
+ bochs->mode_config_initialized = true;
+
+ bochs->dev->mode_config.max_width = 8192;
+ bochs->dev->mode_config.max_height = 8192;
+
+ bochs->dev->mode_config.fb_base = bochs->fb_base;
+ bochs->dev->mode_config.preferred_depth = 24;
+ bochs->dev->mode_config.prefer_shadow = 0;
+
+ bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs;
+
+ bochs_crtc_init(bochs->dev);
+ bochs_encoder_init(bochs->dev);
+ bochs_connector_init(bochs->dev);
+ drm_mode_connector_attach_encoder(&bochs->connector,
+ &bochs->encoder);
+
+ return 0;
+}
+
+void bochs_kms_fini(struct bochs_device *bochs)
+{
+ if (bochs->mode_config_initialized) {
+ drm_mode_config_cleanup(bochs->dev);
+ bochs->mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
new file mode 100644
index 00000000000..b9a695d9279
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -0,0 +1,544 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct bochs_device, ttm.bdev);
+}
+
+static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int bochs_ttm_global_init(struct bochs_device *bochs)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &bochs->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &bochs_ttm_mem_global_init;
+ global_ref->release = &bochs_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ bochs->ttm.bo_global_ref.mem_glob =
+ bochs->ttm.mem_global_ref.object;
+ global_ref = &bochs->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&bochs->ttm.mem_global_ref);
+ return r;
+ }
+
+ return 0;
+}
+
+static void bochs_ttm_global_release(struct bochs_device *bochs)
+{
+ if (bochs->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&bochs->ttm.mem_global_ref);
+ bochs->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct bochs_bo *bo;
+
+ bo = container_of(tbo, struct bochs_bo, bo);
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &bochs_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct bochs_bo *bochsbo = bochs_bo(bo);
+
+ if (!bochs_ttm_bo_is_bochs_bo(bo))
+ return;
+
+ bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM);
+ *pl = bochsbo->placement;
+}
+
+static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct bochs_bo *bochsbo = bochs_bo(bo);
+
+ return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+}
+
+static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct bochs_device *bochs = bochs_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = bochs->fb_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static int bochs_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func bochs_tt_backend_func = {
+ .destroy = &bochs_ttm_backend_destroy,
+};
+
+static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &bochs_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+struct ttm_bo_driver bochs_bo_driver = {
+ .ttm_tt_create = bochs_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bochs_bo_init_mem_type,
+ .evict_flags = bochs_bo_evict_flags,
+ .move = bochs_bo_move,
+ .verify_access = bochs_bo_verify_access,
+ .io_mem_reserve = &bochs_ttm_io_mem_reserve,
+ .io_mem_free = &bochs_ttm_io_mem_free,
+};
+
+int bochs_mm_init(struct bochs_device *bochs)
+{
+ struct ttm_bo_device *bdev = &bochs->ttm.bdev;
+ int ret;
+
+ ret = bochs_ttm_global_init(bochs);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&bochs->ttm.bdev,
+ bochs->ttm.bo_global_ref.ref.object,
+ &bochs_bo_driver,
+ bochs->dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ bochs->fb_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ bochs->ttm.initialized = true;
+ return 0;
+}
+
+void bochs_mm_fini(struct bochs_device *bochs)
+{
+ if (!bochs->ttm.initialized)
+ return;
+
+ ttm_bo_device_release(&bochs->ttm.bdev);
+ bochs_ttm_global_release(bochs);
+ bochs->ttm.initialized = false;
+}
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM) {
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED
+ | TTM_PL_FLAG_VRAM;
+ }
+ if (domain & TTM_PL_FLAG_SYSTEM) {
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ }
+ if (!c) {
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ }
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = bochs_bo_gpu_offset(bo);
+ return 0;
+ }
+
+ bochs_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = bochs_bo_gpu_offset(bo);
+ return 0;
+}
+
+int bochs_bo_unpin(struct bochs_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct bochs_device *bochs;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ bochs = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int bochs_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct bochs_bo **pbochsbo)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct bochs_bo *bochsbo;
+ size_t acc_size;
+ int ret;
+
+ bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL);
+ if (!bochsbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &bochsbo->gem, size);
+ if (ret) {
+ kfree(bochsbo);
+ return ret;
+ }
+
+ bochsbo->bo.bdev = &bochs->ttm.bdev;
+ bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
+
+ bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size,
+ sizeof(struct bochs_bo));
+
+ ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
+ ttm_bo_type_device, &bochsbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, bochs_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pbochsbo = bochsbo;
+ return 0;
+}
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct bochs_bo *bochsbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = ALIGN(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = bochs_bo_create(dev, size, 0, 0, &bochsbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &bochsbo->gem;
+ return 0;
+}
+
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gobj;
+ u32 handle;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = bochs_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+static void bochs_bo_unref(struct bochs_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ *bo = NULL;
+}
+
+void bochs_gem_free_object(struct drm_gem_object *obj)
+{
+ struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
+
+ bochs_bo_unref(&bochs_bo);
+}
+
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct bochs_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_bochs_bo(obj);
+ *offset = bochs_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
+ if (bochs_fb->obj)
+ drm_gem_object_unreference_unlocked(bochs_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs bochs_fb_funcs = {
+ .destroy = bochs_user_framebuffer_destroy,
+};
+
+int bochs_framebuffer_init(struct drm_device *dev,
+ struct bochs_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct drm_framebuffer *
+bochs_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct bochs_framebuffer *bochs_fb;
+ int ret;
+
+ DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+ mode_cmd->width, mode_cmd->height,
+ (mode_cmd->pixel_format) & 0xff,
+ (mode_cmd->pixel_format >> 8) & 0xff,
+ (mode_cmd->pixel_format >> 16) & 0xff,
+ (mode_cmd->pixel_format >> 24) & 0xff);
+
+ if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+ return ERR_PTR(-ENOENT);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL);
+ if (!bochs_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(bochs_fb);
+ return ERR_PTR(ret);
+ }
+ return &bochs_fb->base;
+}
+
+const struct drm_mode_config_funcs bochs_mode_funcs = {
+ .fb_create = bochs_user_framebuffer_create,
+};
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
new file mode 100644
index 00000000000..884923f982d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -0,0 +1,5 @@
+config DRM_PTN3460
+ tristate "PTN3460 DP/LVDS bridge"
+ depends on DRM
+ select DRM_KMS_HELPER
+ ---help---
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
new file mode 100644
index 00000000000..b4733e1fbd2
--- /dev/null
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -Iinclude/drm
+
+obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
new file mode 100644
index 00000000000..98fd17ae491
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -0,0 +1,343 @@
+/*
+ * NXP PTN3460 DP/LVDS bridge driver
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "bridge/ptn3460.h"
+
+#define PTN3460_EDID_ADDR 0x0
+#define PTN3460_EDID_EMULATION_ADDR 0x84
+#define PTN3460_EDID_ENABLE_EMULATION 0
+#define PTN3460_EDID_EMULATION_SELECTION 1
+#define PTN3460_EDID_SRAM_LOAD_ADDR 0x85
+
+struct ptn3460_bridge {
+ struct drm_connector connector;
+ struct i2c_client *client;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ struct edid *edid;
+ int gpio_pd_n;
+ int gpio_rst_n;
+ u32 edid_emulation;
+ bool enabled;
+};
+
+static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
+ u8 *buf, int len)
+{
+ int ret;
+
+ ret = i2c_master_send(ptn_bridge->client, &addr, 1);
+ if (ret <= 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(ptn_bridge->client, buf, len);
+ if (ret <= 0) {
+ DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
+ char val)
+{
+ int ret;
+ char buf[2];
+
+ buf[0] = addr;
+ buf[1] = val;
+
+ ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
+ if (ret <= 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
+{
+ int ret;
+ char val;
+
+ /* Load the selected edid into SRAM (accessed at PTN3460_EDID_ADDR) */
+ ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR,
+ ptn_bridge->edid_emulation);
+ if (ret) {
+ DRM_ERROR("Failed to transfer edid to sram, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Enable EDID emulation and select the desired EDID */
+ val = 1 << PTN3460_EDID_ENABLE_EMULATION |
+ ptn_bridge->edid_emulation << PTN3460_EDID_EMULATION_SELECTION;
+
+ ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val);
+ if (ret) {
+ DRM_ERROR("Failed to write edid value, ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ptn3460_pre_enable(struct drm_bridge *bridge)
+{
+ struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+ int ret;
+
+ if (ptn_bridge->enabled)
+ return;
+
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_set_value(ptn_bridge->gpio_pd_n, 1);
+
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
+ gpio_set_value(ptn_bridge->gpio_rst_n, 0);
+ udelay(10);
+ gpio_set_value(ptn_bridge->gpio_rst_n, 1);
+ }
+
+ /*
+ * There's a bug in the PTN chip where it falsely asserts hotplug before
+ * it is fully functional. We're forced to wait for the maximum start up
+ * time specified in the chip's datasheet to make sure we're really up.
+ */
+ msleep(90);
+
+ ret = ptn3460_select_edid(ptn_bridge);
+ if (ret)
+ DRM_ERROR("Select edid failed ret=%d\n", ret);
+
+ ptn_bridge->enabled = true;
+}
+
+static void ptn3460_enable(struct drm_bridge *bridge)
+{
+}
+
+static void ptn3460_disable(struct drm_bridge *bridge)
+{
+ struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+
+ if (!ptn_bridge->enabled)
+ return;
+
+ ptn_bridge->enabled = false;
+
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n))
+ gpio_set_value(ptn_bridge->gpio_rst_n, 1);
+
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_set_value(ptn_bridge->gpio_pd_n, 0);
+}
+
+static void ptn3460_post_disable(struct drm_bridge *bridge)
+{
+}
+
+void ptn3460_bridge_destroy(struct drm_bridge *bridge)
+{
+ struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
+
+ drm_bridge_cleanup(bridge);
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_free(ptn_bridge->gpio_pd_n);
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n))
+ gpio_free(ptn_bridge->gpio_rst_n);
+ /* Nothing else to free, we've got devm allocated memory */
+}
+
+struct drm_bridge_funcs ptn3460_bridge_funcs = {
+ .pre_enable = ptn3460_pre_enable,
+ .enable = ptn3460_enable,
+ .disable = ptn3460_disable,
+ .post_disable = ptn3460_post_disable,
+ .destroy = ptn3460_bridge_destroy,
+};
+
+int ptn3460_get_modes(struct drm_connector *connector)
+{
+ struct ptn3460_bridge *ptn_bridge;
+ u8 *edid;
+ int ret, num_modes;
+ bool power_off;
+
+ ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
+
+ if (ptn_bridge->edid)
+ return drm_add_edid_modes(connector, ptn_bridge->edid);
+
+ power_off = !ptn_bridge->enabled;
+ ptn3460_pre_enable(ptn_bridge->bridge);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ DRM_ERROR("Failed to allocate edid\n");
+ return 0;
+ }
+
+ ret = ptn3460_read_bytes(ptn_bridge, PTN3460_EDID_ADDR, edid,
+ EDID_LENGTH);
+ if (ret) {
+ kfree(edid);
+ num_modes = 0;
+ goto out;
+ }
+
+ ptn_bridge->edid = (struct edid *)edid;
+ drm_mode_connector_update_edid_property(connector, ptn_bridge->edid);
+
+ num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
+
+out:
+ if (power_off)
+ ptn3460_disable(ptn_bridge->bridge);
+
+ return num_modes;
+}
+
+struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
+{
+ struct ptn3460_bridge *ptn_bridge;
+
+ ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
+
+ return ptn_bridge->encoder;
+}
+
+struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
+ .get_modes = ptn3460_get_modes,
+ .best_encoder = ptn3460_best_encoder,
+};
+
+enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+void ptn3460_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+struct drm_connector_funcs ptn3460_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ptn3460_detect,
+ .destroy = ptn3460_connector_destroy,
+};
+
+int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
+ struct i2c_client *client, struct device_node *node)
+{
+ int ret;
+ struct drm_bridge *bridge;
+ struct ptn3460_bridge *ptn_bridge;
+
+ bridge = devm_kzalloc(dev->dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ DRM_ERROR("Failed to allocate drm bridge\n");
+ return -ENOMEM;
+ }
+
+ ptn_bridge = devm_kzalloc(dev->dev, sizeof(*ptn_bridge), GFP_KERNEL);
+ if (!ptn_bridge) {
+ DRM_ERROR("Failed to allocate ptn bridge\n");
+ return -ENOMEM;
+ }
+
+ ptn_bridge->client = client;
+ ptn_bridge->encoder = encoder;
+ ptn_bridge->bridge = bridge;
+ ptn_bridge->gpio_pd_n = of_get_named_gpio(node, "powerdown-gpio", 0);
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n)) {
+ ret = gpio_request_one(ptn_bridge->gpio_pd_n,
+ GPIOF_OUT_INIT_HIGH, "PTN3460_PD_N");
+ if (ret) {
+ DRM_ERROR("Request powerdown-gpio failed (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ ptn_bridge->gpio_rst_n = of_get_named_gpio(node, "reset-gpio", 0);
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
+ /*
+ * Request the reset pin low to avoid the bridge being
+ * initialized prematurely
+ */
+ ret = gpio_request_one(ptn_bridge->gpio_rst_n,
+ GPIOF_OUT_INIT_LOW, "PTN3460_RST_N");
+ if (ret) {
+ DRM_ERROR("Request reset-gpio failed (%d)\n", ret);
+ gpio_free(ptn_bridge->gpio_pd_n);
+ return ret;
+ }
+ }
+
+ ret = of_property_read_u32(node, "edid-emulation",
+ &ptn_bridge->edid_emulation);
+ if (ret) {
+ DRM_ERROR("Can't read edid emulation value\n");
+ goto err;
+ }
+
+ ret = drm_bridge_init(dev, bridge, &ptn3460_bridge_funcs);
+ if (ret) {
+ DRM_ERROR("Failed to initialize bridge with drm\n");
+ goto err;
+ }
+
+ bridge->driver_private = ptn_bridge;
+ encoder->bridge = bridge;
+
+ ret = drm_connector_init(dev, &ptn_bridge->connector,
+ &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ goto err;
+ }
+ drm_connector_helper_add(&ptn_bridge->connector,
+ &ptn3460_connector_helper_funcs);
+ drm_sysfs_connector_add(&ptn_bridge->connector);
+ drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
+
+ return 0;
+
+err:
+ if (gpio_is_valid(ptn_bridge->gpio_pd_n))
+ gpio_free(ptn_bridge->gpio_pd_n);
+ if (gpio_is_valid(ptn_bridge->gpio_rst_n))
+ gpio_free(ptn_bridge->gpio_rst_n);
+ return ret;
+}
+EXPORT_SYMBOL(ptn3460_init);
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
new file mode 100644
index 00000000000..9864559e5fb
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -0,0 +1,13 @@
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
new file mode 100644
index 00000000000..69ffe7006d5
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+cirrus-y := cirrus_main.o cirrus_mode.o \
+ cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
+
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
new file mode 100644
index 00000000000..08ce520f61a
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat <mjg@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "cirrus_drv.h"
+
+int cirrus_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, cirrus_modeset, int, 0400);
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+
+static struct drm_driver driver;
+
+/* only bind to the cirrus chip in qemu */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 0, 0 },
+ {0,}
+};
+
+
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+ kfree(ap);
+
+ return 0;
+}
+
+static int cirrus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = cirrus_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
+
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int cirrus_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct cirrus_device *cdev = drm_dev->dev_private;
+
+ drm_kms_helper_poll_disable(drm_dev);
+
+ if (cdev->mode_info.gfbdev) {
+ console_lock();
+ fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+ console_unlock();
+ }
+
+ return 0;
+}
+
+static int cirrus_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct cirrus_device *cdev = drm_dev->dev_private;
+
+ drm_helper_resume_force_mode(drm_dev);
+
+ if (cdev->mode_info.gfbdev) {
+ console_lock();
+ fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+ console_unlock();
+ }
+
+ drm_kms_helper_poll_enable(drm_dev);
+ return 0;
+}
+
+static const struct file_operations cirrus_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = cirrus_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+};
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .load = cirrus_driver_load,
+ .unload = cirrus_driver_unload,
+ .fops = &cirrus_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ .gem_free_object = cirrus_gem_free_object,
+ .dumb_create = cirrus_dumb_create,
+ .dumb_map_offset = cirrus_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+};
+
+static const struct dev_pm_ops cirrus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
+ cirrus_pm_resume)
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+ .driver.pm = &cirrus_pm_ops,
+};
+
+static int __init cirrus_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && cirrus_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (cirrus_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ drm_pci_exit(&driver, &cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
new file mode 100644
index 00000000000..117d3eca5e3
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#ifndef __CIRRUS_DRV_H__
+#define __CIRRUS_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu Cirrus emulation"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define CIRRUSFB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(SEQ_INDEX, reg); \
+ WREG8(SEQ_DATA, v); \
+ } while (0) \
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(CRT_INDEX, reg); \
+ WREG8(CRT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+/*
+ * Cirrus has a "hidden" DAC register that can be accessed by writing to
+ * the pixel mask register to reset the state, then reading from the register
+ * four times. The next write will then pass to the DAC
+ */
+#define VGA_DAC_MASK 0x6
+
+#define WREG_HDR(v) \
+ do { \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ WREG8(VGA_DAC_MASK, v); \
+ } while (0) \
+
+
+#define CIRRUS_MAX_FB_HEIGHT 4096
+#define CIRRUS_MAX_FB_WIDTH 4096
+
+#define CIRRUS_DPMS_CLEARED (-1)
+
+#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
+#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
+#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
+
+struct cirrus_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct cirrus_fbdev;
+struct cirrus_mode_info {
+ bool mode_config_initialized;
+ struct cirrus_crtc *crtc;
+ /* pointer to fbdev info structure */
+ struct cirrus_fbdev *gfbdev;
+};
+
+struct cirrus_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+struct cirrus_connector {
+ struct drm_connector base;
+};
+
+struct cirrus_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct cirrus_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+};
+
+struct cirrus_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ struct cirrus_mc mc;
+ struct cirrus_mode_info mode_info;
+
+ int num_crtc;
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ } ttm;
+ bool mm_inited;
+};
+
+
+struct cirrus_fbdev {
+ struct drm_fb_helper helper;
+ struct cirrus_framebuffer gfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+};
+
+struct cirrus_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
+
+static inline struct cirrus_bo *
+cirrus_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct cirrus_bo, bo);
+}
+
+
+#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+ /* cirrus_mode.c */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+
+ /* cirrus_main.c */
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags);
+void cirrus_device_fini(struct cirrus_device *cdev);
+void cirrus_gem_free_object(struct drm_gem_object *obj);
+int cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+ /* cirrus_display.c */
+int cirrus_modeset_init(struct cirrus_device *cdev);
+void cirrus_modeset_fini(struct cirrus_device *cdev);
+
+ /* cirrus_fbdev.c */
+int cirrus_fbdev_init(struct cirrus_device *cdev);
+void cirrus_fbdev_fini(struct cirrus_device *cdev);
+
+
+
+ /* cirrus_irq.c */
+void cirrus_driver_irq_preinstall(struct drm_device *dev);
+int cirrus_driver_irq_postinstall(struct drm_device *dev);
+void cirrus_driver_irq_uninstall(struct drm_device *dev);
+irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
+
+ /* cirrus_kms.c */
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
+int cirrus_driver_unload(struct drm_device *dev);
+extern struct drm_ioctl_desc cirrus_ioctls[];
+extern int cirrus_max_ioctl;
+
+int cirrus_mm_init(struct cirrus_device *cirrus);
+void cirrus_mm_fini(struct cirrus_device *cirrus);
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo);
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
+
+static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo);
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
new file mode 100644
index 00000000000..32bbba0a787
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <linux/fb.h>
+
+#include "cirrus_drv.h"
+
+static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct cirrus_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+ int ret = -EBUSY;
+ bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
+
+ obj = afbdev->gfb.obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+ if (drm_can_sleep())
+ ret = cirrus_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+ return;
+ }
+
+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ cirrus_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_bo_unreserve(bo);
+}
+
+static void cirrus_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void cirrus_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void cirrus_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops cirrusfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cirrus_fillrect,
+ .fb_copyarea = cirrus_copyarea,
+ .fb_imageblit = cirrus_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (bpp > 24)
+ return -EINVAL;
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = cirrus_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int cirrusfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
+ struct drm_device *dev = gfbdev->helper.dev;
+ struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct cirrus_bo *bo = NULL;
+ int size, ret;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_cirrus_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = gfbdev;
+
+ ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ gfbdev->sysram = sysram;
+ gfbdev->size = size;
+
+ fb = &gfbdev->gfb.base;
+ if (!fb) {
+ DRM_INFO("fb is NULL\n");
+ return -EINVAL;
+ }
+
+ /* setup helper */
+ gfbdev->helper.fb = fb;
+ gfbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "cirrusdrmfb");
+
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &cirrusfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = cdev->mc.vram_size;
+
+ info->fix.smem_start = cdev->dev->mode_config.fb_base;
+ info->fix.smem_len = cdev->mc.vram_size;
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+ DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
+ DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
+ DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+
+ return 0;
+out_iounmap:
+ return ret;
+}
+
+static int cirrus_fbdev_destroy(struct drm_device *dev,
+ struct cirrus_fbdev *gfbdev)
+{
+ struct fb_info *info;
+ struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+
+ if (gfbdev->helper.fbdev) {
+ info = gfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ vfree(gfbdev->sysram);
+ drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_unregister_private(&gfb->base);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+ .gamma_set = cirrus_crtc_fb_gamma_set,
+ .gamma_get = cirrus_crtc_fb_gamma_get,
+ .fb_probe = cirrusfb_create,
+};
+
+int cirrus_fbdev_init(struct cirrus_device *cdev)
+{
+ struct cirrus_fbdev *gfbdev;
+ int ret;
+ int bpp_sel = 24;
+
+ /*bpp_sel = 8;*/
+ gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
+ if (!gfbdev)
+ return -ENOMEM;
+
+ cdev->mode_info.gfbdev = gfbdev;
+ gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+ spin_lock_init(&gfbdev->dirty_lock);
+
+ ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+ cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+ if (ret) {
+ kfree(gfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(cdev->dev);
+ drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+
+ return 0;
+}
+
+void cirrus_fbdev_fini(struct cirrus_device *cdev)
+{
+ if (!cdev->mode_info.gfbdev)
+ return;
+
+ cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
+ kfree(cdev->mode_info.gfbdev);
+ cdev->mode_info.gfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
new file mode 100644
index 00000000000..99c1983f99d
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "cirrus_drv.h"
+
+
+static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+ if (cirrus_fb->obj)
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+ .destroy = cirrus_user_framebuffer_destroy,
+};
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct drm_framebuffer *
+cirrus_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ int ret;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /* cirrus can't handle > 24bpp framebuffers at all */
+ if (bpp > 24)
+ return ERR_PTR(-EINVAL);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
+ if (!cirrus_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(cirrus_fb);
+ return ERR_PTR(ret);
+ }
+ return &cirrus_fb->base;
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_funcs = {
+ .fb_create = cirrus_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void cirrus_vram_fini(struct cirrus_device *cdev)
+{
+ iounmap(cdev->rmmio);
+ cdev->rmmio = NULL;
+ if (cdev->mc.vram_base)
+ release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int cirrus_vram_init(struct cirrus_device *cdev)
+{
+ /* BAR 0 is VRAM */
+ cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
+ /* We have 4MB of VRAM */
+ cdev->mc.vram_size = 4 * 1024 * 1024;
+
+ if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
+ "cirrusdrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Our emulated hardware has two sets of memory. One is video RAM and can
+ * simply be used as a linear framebuffer - the other provides mmio access
+ * to the display registers. The latter can also be accessed via IO port
+ * access, but we map the range and use mmio to program them instead
+ */
+
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev, uint32_t flags)
+{
+ int ret;
+
+ cdev->dev = ddev;
+ cdev->flags = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ cdev->num_crtc = 1;
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
+ cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
+
+ if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
+ "cirrusdrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
+
+ if (cdev->rmmio == NULL)
+ return -ENOMEM;
+
+ ret = cirrus_vram_init(cdev);
+ if (ret) {
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_device_fini(struct cirrus_device *cdev)
+{
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ cirrus_vram_fini(cdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct cirrus_device *cdev;
+ int r;
+
+ cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
+ if (cdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)cdev;
+
+ r = cirrus_device_init(cdev, dev, dev->pdev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+
+ r = cirrus_mm_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+
+ r = cirrus_modeset_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+
+ dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+out:
+ if (r)
+ cirrus_driver_unload(dev);
+ return r;
+}
+
+int cirrus_driver_unload(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+
+ if (cdev == NULL)
+ return 0;
+ cirrus_modeset_fini(cdev);
+ cirrus_mm_fini(cdev);
+ cirrus_device_fini(cdev);
+ kfree(cdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct cirrus_bo *cirrusbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &cirrusbo->gem;
+ return 0;
+}
+
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = cirrus_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+static void cirrus_bo_unref(struct cirrus_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ *bo = NULL;
+}
+
+void cirrus_gem_free_object(struct drm_gem_object *obj)
+{
+ struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
+
+ cirrus_bo_unref(&cirrus_bo);
+}
+
+
+static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int
+cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct cirrus_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_cirrus_bo(obj);
+ *offset = cirrus_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
new file mode 100644
index 00000000000..49332c5fe35
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -0,0 +1,622 @@
+
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <video/cirrus.h>
+
+#include "cirrus_drv.h"
+
+#define CIRRUS_LUT_SIZE 256
+
+#define PALETTE_INDEX 0x8
+#define PALETTE_DATA 0x9
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(PALETTE_INDEX, i);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
+ }
+}
+
+/*
+ * The DRM core requires DPMS functions, but they make little sense in our
+ * case and so are just stubs
+ */
+
+static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ u8 sr01, gr0e;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ sr01 = 0x00;
+ gr0e = 0x00;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ sr01 = 0x20;
+ gr0e = 0x02;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ sr01 = 0x20;
+ gr0e = 0x04;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ sr01 = 0x20;
+ gr0e = 0x06;
+ break;
+ default:
+ return;
+ }
+
+ WREG8(SEQ_INDEX, 0x1);
+ sr01 |= RREG8(SEQ_DATA) & ~0x20;
+ WREG_SEQ(0x1, sr01);
+
+ WREG8(GFX_INDEX, 0xe);
+ gr0e |= RREG8(GFX_DATA) & ~0x06;
+ WREG_GFX(0xe, gr0e);
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+
+ WREG8(CRT_INDEX, 0x1b);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ WREG_CRT(0x1b, tmp);
+ WREG8(CRT_INDEX, 0x1d);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ WREG_CRT(0x1d, tmp);
+}
+
+/* cirrus is different - we will force move buffers out of VRAM */
+static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ struct cirrus_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ cirrus_fb = to_cirrus_framebuffer(fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ cirrus_bo_push_sysram(bo);
+ cirrus_bo_unreserve(bo);
+ }
+
+ cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ cirrus_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ cirrus_bo_unreserve(bo);
+
+ cirrus_set_start_address(crtc, (u32)gpu_addr);
+ return 0;
+}
+
+static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
+ WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
+ WREG_CRT(VGA_CRTC_H_DISP, hdispend);
+ WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
+ WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
+ WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 16;
+ if (vtotal & 256)
+ tmp |= 1;
+ if (vdispend & 256)
+ tmp |= 2;
+ if ((vdispend + 1) & 256)
+ tmp |= 8;
+ if (vtotal & 512)
+ tmp |= 32;
+ if (vdispend & 512)
+ tmp |= 64;
+ WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 64)
+ tmp |= 16;
+ if ((htotal + 5) & 128)
+ tmp |= 32;
+ if (vtotal & 256)
+ tmp |= 64;
+ if (vtotal & 512)
+ tmp |= 128;
+
+ WREG_CRT(CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ WREG_CRT(VGA_CRTC_MODE, 0x03);
+
+ WREG8(SEQ_INDEX, 0x7);
+ sr07 = RREG8(SEQ_DATA);
+ sr07 &= 0xe0;
+ hdr = 0;
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0x17;
+ hdr = 0xc1;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ WREG_SEQ(0x7, sr07);
+
+ /* Program the pitch */
+ tmp = crtc->primary->fb->pitches[0] / 8;
+ WREG_CRT(VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (crtc->primary->fb->pitches[0] >> 7) & 0x10;
+ tmp |= (crtc->primary->fb->pitches[0] >> 6) & 0x40;
+ WREG_CRT(0x1b, tmp);
+
+ /* Enable high-colour modes */
+ WREG_GFX(VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ WREG_GFX(VGA_GFX_MISC, 0x01);
+
+ WREG_HDR(hdr);
+ cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+ /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+ outb(0x20, 0x3c0);
+ return 0;
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void cirrus_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ int i;
+
+ if (size != CIRRUS_LUT_SIZE)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = red[i];
+ cirrus_crtc->lut_g[i] = green[i];
+ cirrus_crtc->lut_b[i] = blue[i];
+ }
+ cirrus_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void cirrus_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(cirrus_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+ .gamma_set = cirrus_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = cirrus_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
+ .dpms = cirrus_crtc_dpms,
+ .mode_fixup = cirrus_crtc_mode_fixup,
+ .mode_set = cirrus_crtc_mode_set,
+ .mode_set_base = cirrus_crtc_mode_set_base,
+ .prepare = cirrus_crtc_prepare,
+ .commit = cirrus_crtc_commit,
+ .load_lut = cirrus_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void cirrus_crtc_init(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+ struct cirrus_crtc *cirrus_crtc;
+ int i;
+
+ cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
+ (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (cirrus_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
+ cdev->mode_info.crtc = cirrus_crtc;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = i;
+ cirrus_crtc->lut_g[i] = i;
+ cirrus_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ cirrus_crtc->lut_r[regno] = red;
+ cirrus_crtc->lut_g[regno] = green;
+ cirrus_crtc->lut_b[regno] = blue;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ *red = cirrus_crtc->lut_r[regno];
+ *green = cirrus_crtc->lut_g[regno];
+ *blue = cirrus_crtc->lut_b[regno];
+}
+
+
+static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void cirrus_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(cirrus_encoder);
+}
+
+static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
+ .dpms = cirrus_encoder_dpms,
+ .mode_fixup = cirrus_encoder_mode_fixup,
+ .mode_set = cirrus_encoder_mode_set,
+ .prepare = cirrus_encoder_prepare,
+ .commit = cirrus_encoder_commit,
+};
+
+static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
+ .destroy = cirrus_encoder_destroy,
+};
+
+static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct cirrus_encoder *cirrus_encoder;
+
+ cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
+ if (!cirrus_encoder)
+ return NULL;
+
+ encoder = &cirrus_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+static int cirrus_vga_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ /* Just add a static list of modes */
+ count = drm_add_modes_noedid(connector, 1280, 1024);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return count;
+}
+
+static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status cirrus_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void cirrus_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+ .get_modes = cirrus_vga_get_modes,
+ .best_encoder = cirrus_connector_best_encoder,
+};
+
+struct drm_connector_funcs cirrus_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cirrus_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cirrus_connector_destroy,
+};
+
+static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct cirrus_connector *cirrus_connector;
+
+ cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
+ if (!cirrus_connector)
+ return NULL;
+
+ connector = &cirrus_connector->base;
+
+ drm_connector_init(dev, connector,
+ &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+
+ return connector;
+}
+
+
+int cirrus_modeset_init(struct cirrus_device *cdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ drm_mode_config_init(cdev->dev);
+ cdev->mode_info.mode_config_initialized = true;
+
+ cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
+ cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
+
+ cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
+ cdev->dev->mode_config.preferred_depth = 24;
+ /* don't prefer a shadow on virt GPU */
+ cdev->dev->mode_config.prefer_shadow = 0;
+
+ cirrus_crtc_init(cdev->dev);
+
+ encoder = cirrus_encoder_init(cdev->dev);
+ if (!encoder) {
+ DRM_ERROR("cirrus_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = cirrus_vga_init(cdev->dev);
+ if (!connector) {
+ DRM_ERROR("cirrus_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = cirrus_fbdev_init(cdev);
+ if (ret) {
+ DRM_ERROR("cirrus_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_modeset_fini(struct cirrus_device *cdev)
+{
+ cirrus_fbdev_fini(cdev);
+
+ if (cdev->mode_info.mode_config_initialized) {
+ drm_mode_config_cleanup(cdev->dev);
+ cdev->mode_info.mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
new file mode 100644
index 00000000000..92e6b778609
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "cirrus_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct cirrus_device *
+cirrus_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct cirrus_device, ttm.bdev);
+}
+
+static int
+cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &cirrus->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &cirrus_ttm_mem_global_init;
+ global_ref->release = &cirrus_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ cirrus->ttm.bo_global_ref.mem_glob =
+ cirrus->ttm.mem_global_ref.object;
+ global_ref = &cirrus->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+static void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+ if (cirrus->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ cirrus->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct cirrus_bo *bo;
+
+ bo = container_of(tbo, struct cirrus_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &cirrus_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ if (!cirrus_ttm_bo_is_cirrus_bo(bo))
+ return;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
+ *pl = cirrusbo->placement;
+}
+
+static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
+}
+
+static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct cirrus_device *cirrus = cirrus_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int cirrus_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func cirrus_tt_backend_func = {
+ .destroy = &cirrus_ttm_backend_destroy,
+};
+
+
+static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &cirrus_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver cirrus_bo_driver = {
+ .ttm_tt_create = cirrus_ttm_tt_create,
+ .ttm_tt_populate = cirrus_ttm_tt_populate,
+ .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
+ .init_mem_type = cirrus_bo_init_mem_type,
+ .evict_flags = cirrus_bo_evict_flags,
+ .move = cirrus_bo_move,
+ .verify_access = cirrus_bo_verify_access,
+ .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
+ .io_mem_free = &cirrus_ttm_io_mem_free,
+};
+
+int cirrus_mm_init(struct cirrus_device *cirrus)
+{
+ int ret;
+ struct drm_device *dev = cirrus->dev;
+ struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+
+ ret = cirrus_ttm_global_init(cirrus);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+ cirrus->ttm.bo_global_ref.ref.object,
+ &cirrus_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ cirrus->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
+ cirrus->mm_inited = true;
+ return 0;
+}
+
+void cirrus_mm_fini(struct cirrus_device *cirrus)
+{
+ if (!cirrus->mm_inited)
+ return;
+
+ ttm_bo_device_release(&cirrus->ttm.bdev);
+
+ cirrus_ttm_global_release(cirrus);
+
+ arch_phys_wc_del(cirrus->fb_mtrr);
+ cirrus->fb_mtrr = 0;
+}
+
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+ struct cirrus_bo *cirrusbo;
+ size_t acc_size;
+ int ret;
+
+ cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
+ if (!cirrusbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
+ if (ret) {
+ kfree(cirrusbo);
+ return ret;
+ }
+
+ cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
+ sizeof(struct cirrus_bo));
+
+ ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
+ ttm_bo_type_device, &cirrusbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, cirrus_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pcirrusbo = cirrusbo;
+ return 0;
+}
+
+static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ }
+
+ cirrus_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ return 0;
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct cirrus_device *cirrus;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ cirrus = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index d68888fe3df..dde205cef38 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -31,8 +31,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include <linux/module.h>
+#include <linux/slab.h>
#if __OS_HAS_AGP
@@ -52,7 +53,7 @@
*/
int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
- DRM_AGP_KERN *kern;
+ struct agp_kern_info *kern;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -192,25 +193,23 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
- DRM_AGP_MEM *memory;
+ struct agp_memory *memory;
unsigned long pages;
u32 type;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+ if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL)))
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
-
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
- if (!(memory = drm_alloc_agp(dev, pages, type))) {
+ if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
kfree(entry);
return -ENOMEM;
}
@@ -392,14 +391,16 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
+ *
+ * Note that final cleanup of the kmalloced structure is directly done in
+ * drm_pci_agp_destroy.
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
struct drm_agp_head *head = NULL;
- if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+ if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
return NULL;
- memset((void *)head, 0, sizeof(*head));
head->bridge = agp_find_bridge(dev->pdev);
if (!head->bridge) {
if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
@@ -422,36 +423,40 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return head;
}
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
- size_t pages, u32 type)
+/**
+ * drm_agp_clear - Clear AGP resource list
+ * @dev: DRM device
+ *
+ * Iterate over all AGP resources and remove them. But keep the AGP head
+ * intact so it can still be used. It is safe to call this if AGP is disabled or
+ * was already removed.
+ *
+ * If DRIVER_MODESET is active, nothing is done to protect the modesetting
+ * resources from getting destroyed. Drivers are responsible of cleaning them up
+ * during device shutdown.
+ */
+void drm_agp_clear(struct drm_device *dev)
{
- return agp_allocate_memory(bridge, pages, type);
-}
+ struct drm_agp_mem *entry, *tempe;
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return 0;
- agp_free_memory(handle);
- return 1;
-}
+ if (!dev->agp)
+ return;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+ if (entry->bound)
+ drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&dev->agp->memory);
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
- if (!handle)
- return -EINVAL;
- return agp_bind_memory(handle, start);
-}
+ if (dev->agp->acquired)
+ drm_agp_release(dev);
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return -EINVAL;
- return agp_unbind_memory(handle);
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
}
/**
@@ -461,19 +466,19 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
-DRM_AGP_MEM *
+struct agp_memory *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
u32 type)
{
- DRM_AGP_MEM *mem;
+ struct agp_memory *mem;
int ret, i;
DRM_DEBUG("\n");
- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+ mem = agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
@@ -486,7 +491,7 @@ drm_agp_bind_pages(struct drm_device *dev,
mem->page_count = num_pages;
mem->is_flushed = true;
- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+ ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
@@ -497,10 +502,4 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 932b5aa96a6..3cedae12b3c 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -33,7 +33,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <drm/drmP.h>
/**
* Find the file with the given magic number.
@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
@@ -102,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
@@ -137,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
* If there is a magic number in drm_file::magic then use it, otherwise
* searches an unique non-zero magic number and add it associating it with \p
* file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@@ -174,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
* \return zero if authentication successed, or a negative number otherwise.
*
* Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 00000000000..0406110f83e
--- /dev/null
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,185 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include <linux/export.h>
+#include <drm/drm_buffer.h>
+
+/**
+ * Allocate the drm buffer object.
+ *
+ * buf: Pointer to a pointer where the object is stored.
+ * size: The number of bytes to allocate.
+ */
+int drm_buffer_alloc(struct drm_buffer **buf, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ /* Allocating pointer table to end of structure makes drm_buffer
+ * variable sized */
+ *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
+ GFP_KERNEL);
+
+ if (*buf == NULL) {
+ DRM_ERROR("Failed to allocate drm buffer object to hold"
+ " %d bytes in %d pages.\n",
+ size, nr_pages);
+ return -ENOMEM;
+ }
+
+ (*buf)->size = size;
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ (*buf)->data[idx] =
+ kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
+ GFP_KERNEL);
+
+
+ if ((*buf)->data[idx] == NULL) {
+ DRM_ERROR("Failed to allocate %dth page for drm"
+ " buffer with %d bytes and %d pages.\n",
+ idx + 1, size, nr_pages);
+ goto error_out;
+ }
+
+ }
+
+ return 0;
+
+error_out:
+
+ /* Only last element can be null pointer so check for it first. */
+ if ((*buf)->data[idx])
+ kfree((*buf)->data[idx]);
+
+ for (--idx; idx >= 0; --idx)
+ kfree((*buf)->data[idx]);
+
+ kfree(*buf);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buffer_alloc);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ * user_data: A pointer the data that is copied to the buffer.
+ * size: The Number of bytes to copy.
+ */
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ if (size > buf->size) {
+ DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
+ " %d bytes space\n",
+ size, buf->size);
+ return -EFAULT;
+ }
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ if (copy_from_user(buf->data[idx],
+ user_data + idx * PAGE_SIZE,
+ min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
+ DRM_ERROR("Failed to copy user data (%p) to drm buffer"
+ " (%p) %dth page.\n",
+ user_data, buf, idx);
+ return -EFAULT;
+
+ }
+ }
+ buf->iterator = 0;
+ return 0;
+}
+EXPORT_SYMBOL(drm_buffer_copy_from_user);
+
+/**
+ * Free the drm buffer object
+ */
+void drm_buffer_free(struct drm_buffer *buf)
+{
+
+ if (buf != NULL) {
+
+ int nr_pages = buf->size / PAGE_SIZE + 1;
+ int idx;
+ for (idx = 0; idx < nr_pages; ++idx)
+ kfree(buf->data[idx]);
+
+ kfree(buf);
+ }
+}
+EXPORT_SYMBOL(drm_buffer_free);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ * objsize: The size of the objet in bytes.
+ * stack_obj: A pointer to a memory location where object can be copied.
+ */
+void *drm_buffer_read_object(struct drm_buffer *buf,
+ int objsize, void *stack_obj)
+{
+ int idx = drm_buffer_index(buf);
+ int page = drm_buffer_page(buf);
+ void *obj = NULL;
+
+ if (idx + objsize <= PAGE_SIZE) {
+ obj = &buf->data[page][idx];
+ } else {
+ /* The object is split which forces copy to temporary object.*/
+ int beginsz = PAGE_SIZE - idx;
+ memcpy(stack_obj, &buf->data[page][idx], beginsz);
+
+ memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
+ objsize - beginsz);
+
+ obj = stack_obj;
+ }
+
+ drm_buffer_advance(buf, objsize);
+ return obj;
+}
+EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8417cc4c43f..68175b54504 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -34,22 +34,11 @@
*/
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <linux/log2.h>
+#include <linux/export.h>
#include <asm/shmparam.h>
-#include "drmP.h"
-
-resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_start(dev->pdev, resource);
-}
-EXPORT_SYMBOL(drm_get_resource_start);
-
-resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_len(dev->pdev, resource);
-}
-
-EXPORT_SYMBOL(drm_get_resource_len);
+#include <drm/drmP.h>
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
@@ -58,10 +47,11 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
list_for_each_entry(entry, &dev->maplist, head) {
/*
* Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we ignore the map
- * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that each driver will have only one resource of
- * each type.
+ * while PCI resources may live above that, we only compare the
+ * lower 32 bits of the map offset for maps of type
+ * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+ * It is assumed that if a driver have more than one resource
+ * of each type, the lower 32 bits are different.
*/
if (!entry->map ||
map->type != entry->map->type ||
@@ -71,9 +61,12 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
case _DRM_SHM:
if (map->flags != _DRM_CONTAINS_LOCK)
break;
+ return entry;
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- return entry;
+ if ((entry->map->offset & 0xffffffff) ==
+ (map->offset & 0xffffffff))
+ return entry;
default: /* Make gcc happy */
;
}
@@ -188,16 +181,13 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
if (map->offset + (map->size-1) < map->offset ||
map->offset < virt_to_phys(high_memory)) {
kfree(map);
return -EINVAL;
}
#endif
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
/* Some drivers preinitialize some maps, without the X Server
* needing to be aware of it. Therefore, we just return success
* when the server tries to create a duplicate map.
@@ -217,15 +207,17 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return 0;
}
- if (drm_core_has_MTRR(dev)) {
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr = mtrr_add(map->offset, map->size,
- MTRR_TYPE_WRCOMB, 1);
- }
+ if (map->type == _DRM_FRAME_BUFFER ||
+ (map->flags & _DRM_WRITE_COMBINING)) {
+ map->mtrr =
+ arch_phys_wc_add(map->offset, map->size);
}
if (map->type == _DRM_REGISTERS) {
- map->handle = ioremap(map->offset, map->size);
+ if (map->flags & _DRM_WRITE_COMBINING)
+ map->handle = ioremap_wc(map->offset,
+ map->size);
+ else
+ map->handle = ioremap(map->offset, map->size);
if (!map->handle) {
kfree(map);
return -ENOMEM;
@@ -249,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
}
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
- map->size, drm_order(map->size), map->handle);
+ map->size, order_base_2(map->size), map->handle);
if (!map->handle) {
kfree(map);
return -ENOMEM;
@@ -269,7 +261,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
struct drm_agp_mem *entry;
int valid = 0;
- if (!drm_core_has_AGP(dev)) {
+ if (!dev->agp) {
kfree(map);
return -EINVAL;
}
@@ -311,9 +303,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
break;
}
- case _DRM_GEM:
- DRM_ERROR("tried to addmap GEM object\n");
- break;
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
kfree(map);
@@ -340,14 +329,13 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return -EINVAL;
}
- list = kmalloc(sizeof(*list), GFP_KERNEL);
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
if (!list) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
kfree(map);
return -EINVAL;
}
- memset(list, 0, sizeof(*list));
list->map = map;
mutex_lock(&dev->struct_mutex);
@@ -375,7 +363,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
list->master = dev->primary->master;
*maplist = list;
return 0;
- }
+}
int drm_addmap(struct drm_device * dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
@@ -421,6 +409,15 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
map->handle = (void *)(unsigned long)maplist->user_token;
+
+ /*
+ * It appears that there are no users of this value whatsoever --
+ * drmAddMap just discards it. Let's not encourage its use.
+ * (Keeping drm_addmap_core's returned mtrr value would be wrong --
+ * it's not a real mtrr index anymore.)
+ */
+ map->mtrr = -1;
+
return 0;
}
@@ -462,11 +459,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
iounmap(map->handle);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr, map->offset, map->size);
- DRM_DEBUG("mtrr_del=%d\n", retcode);
- }
+ arch_phys_wc_del(map->mtrr);
break;
case _DRM_SHM:
vfree(map->handle);
@@ -487,9 +480,6 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
}
kfree(map);
@@ -631,7 +621,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -652,8 +642,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
@@ -668,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -690,13 +678,12 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -716,11 +703,10 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -729,7 +715,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
@@ -806,29 +791,27 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
- DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
- request->count, request->size, size, order, dev->queue_count);
+ DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
+ request->count, request->size, size, order);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -844,22 +827,20 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
- entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+ entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
if (!entry->seglist) {
kfree(entry->buflist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->seglist, 0, count * sizeof(*entry->seglist));
/* Keep the original pagelist until we know all the allocations
* have succeeded
@@ -919,12 +900,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size,
- GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size,
+ GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -935,7 +915,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
@@ -960,7 +939,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
- /* No allocations failed, so now we can replace the orginal pagelist
+ /* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
@@ -1014,7 +993,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1035,16 +1014,14 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -1060,14 +1037,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist),
GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -1088,11 +1064,10 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1102,8 +1077,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
@@ -1148,166 +1121,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return 0;
}
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_FB;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-
/**
* Add buffers for DMA transfers (ioctl).
*
@@ -1328,6 +1141,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
struct drm_buf_desc *request = data;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1339,7 +1155,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
if (request->flags & _DRM_SG_BUFFER)
ret = drm_addbufs_sg(dev, request);
else if (request->flags & _DRM_FB_BUFFER)
- ret = drm_addbufs_fb(dev, request);
+ ret = -EINVAL;
else
ret = drm_addbufs_pci(dev, request);
@@ -1359,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
* \param arg pointer to a drm_buf_info structure.
* \return zero on success or a negative number on failure.
*
- * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * Increments drm_device::buf_use while holding the drm_device::buf_lock
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
@@ -1371,19 +1187,22 @@ int drm_infobufs(struct drm_device *dev, void *data,
int i;
int count;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
if (!dma)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
@@ -1450,6 +1269,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
int order;
struct drm_buf_entry *entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1458,7 +1280,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
DRM_DEBUG("%d, %d, %d\n",
request->size, request->low_mark, request->high_mark);
- order = drm_order(request->size);
+ order = order_base_2(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
entry = &dma->bufs[order];
@@ -1495,6 +1317,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
int idx;
struct drm_buf *buf;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1531,8 +1356,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
* \param arg pointer to a drm_buf_map structure.
* \return zero on success or a negative number on failure.
*
- * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
@@ -1547,26 +1372,27 @@ int drm_mapbufs(struct drm_device *dev, void *data,
struct drm_buf_map *request = data;
int i;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
if (!dma)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
if (request->count >= dma->buf_count) {
- if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+ if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))
- || (drm_core_check_feature(dev, DRIVER_FB_DMA)
- && (dma->flags & _DRM_DMA_USE_FB))) {
+ && (dma->flags & _DRM_DMA_USE_SG))) {
struct drm_local_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
@@ -1574,18 +1400,14 @@ int drm_mapbufs(struct drm_device *dev, void *data,
retcode = -EINVAL;
goto done;
}
- down_write(&current->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, map->size,
+ virtual = vm_mmap(file_priv->filp, 0, map->size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
token);
- up_write(&current->mm->mmap_sem);
} else {
- down_write(&current->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+ virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
- up_write(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
/* Real error */
@@ -1627,25 +1449,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
return retcode;
}
-/**
- * Compute size order. Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
+int drm_dma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- int order;
- unsigned long tmp;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+ if (dev->driver->dma_ioctl)
+ return dev->driver->dma_ioctl(dev, data, file_priv);
+ else
+ return -EINVAL;
+}
- if (size & (size - 1))
- ++order;
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+ struct drm_map_list *entry;
- return order;
+ list_for_each_entry(entry, &dev->maplist, head) {
+ if (entry->map && entry->map->type == _DRM_SHM &&
+ (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+ return entry->map;
+ }
+ }
+ return NULL;
}
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0e3bd5b54b7..a6b690626a6 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -28,22 +28,30 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include "drmP.h"
+#include <linux/export.h>
+#include <drm/drmP.h>
#if defined(CONFIG_X86)
+
+/*
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
+ * in the caller.
+ */
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
+ const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
- page_virtual = kmap_atomic(page, KM_USER0);
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
+ page_virtual = kmap_atomic(page);
+ for (i = 0; i < PAGE_SIZE; i += size)
+ clflushopt(page_virtual + i);
+ kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
@@ -86,10 +94,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
if (unlikely(page == NULL))
continue;
- page_virtual = kmap_atomic(page, KM_USER0);
+ page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
+ kunmap_atomic(page_virtual);
}
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
@@ -97,3 +105,50 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
+
+void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ struct sg_page_iter sg_iter;
+
+ mb();
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+ drm_clflush_page(sg_page_iter_page(&sg_iter));
+ mb();
+
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
+void
+drm_clflush_virt_range(void *addr, unsigned long length)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ void *end = addr + length;
+ mb();
+ for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
+ clflushopt(addr);
+ clflushopt(end - 1);
+ mb();
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_virt_range);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 2607753a320..a4b017b6849 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -40,7 +40,7 @@
* needed by SiS driver's memory management.
*/
-#include "drmP.h"
+#include <drm/drmP.h>
/******************************************************************/
/** \name Context bitmap support */
@@ -74,23 +74,13 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
*/
static int drm_ctxbitmap_next(struct drm_device * dev)
{
- int new_id;
int ret;
-again:
- if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&dev->ctx_idr, NULL,
- DRM_RESERVED_CONTEXTS, &new_id);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- goto again;
- }
+ ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
+ GFP_KERNEL);
mutex_unlock(&dev->struct_mutex);
- return new_id;
+ return ret;
}
/**
@@ -117,7 +107,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
- idr_remove_all(&dev->ctx_idr);
+ idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
@@ -154,8 +144,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
return -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
-
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
@@ -164,6 +152,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
break;
}
}
+
+ mutex_unlock(&dev->struct_mutex);
+
if (request->handle == NULL)
return -EINVAL;
@@ -260,7 +251,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
- dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
@@ -270,7 +260,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
- wake_up(&dev->context_wait);
return 0;
}
@@ -333,14 +322,6 @@ int drm_addctx(struct drm_device *dev, void *data,
return -ENOMEM;
}
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_ctor)
- if (!dev->driver->context_ctor(dev, ctx->handle)) {
- DRM_DEBUG("Running out of ctxs or memory.\n");
- return -ENOMEM;
- }
- }
-
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
@@ -353,18 +334,11 @@ int drm_addctx(struct drm_device *dev, void *data,
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist);
- ++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
return 0;
}
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- /* This does nothing */
- return 0;
-}
-
/**
* Get context.
*
@@ -457,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
if (pos->handle == ctx->handle) {
list_del(&pos->head);
kfree(pos);
- --dev->ctx_count;
}
}
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5124401f266..fe94cc10cd3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -29,19 +29,116 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <linux/ctype.h>
#include <linux/list.h>
-#include "drm.h"
-#include "drmP.h"
-#include "drm_crtc.h"
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_lock.h>
-struct drm_prop_enum_list {
- int type;
- char *name;
-};
+#include "drm_crtc_internal.h"
+
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (WARN_ON(!ctx))
+ return;
+
+ mutex_lock(&config->mutex);
+
+ drm_modeset_acquire_init(ctx, 0);
+
+retry:
+ ret = drm_modeset_lock(&config->connection_mutex, ctx);
+ if (ret)
+ goto fail;
+ ret = drm_modeset_lock_all_crtcs(dev, ctx);
+ if (ret)
+ goto fail;
+
+ WARN_ON(config->acquire_ctx);
+
+ /* now we hold the locks, so now that it is safe, stash the
+ * ctx for drm_modeset_unlock_all():
+ */
+ config->acquire_ctx = ctx;
+
+ drm_warn_on_modeset_not_all_locked(dev);
+
+ return;
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ goto retry;
+ }
+}
+EXPORT_SYMBOL(drm_modeset_lock_all);
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ *
+ * This function drop all modeset locks taken by drm_modeset_lock_all.
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+
+ if (WARN_ON(!ctx))
+ return;
+
+ config->acquire_ctx = NULL;
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_all);
+
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ *
+ * Useful as a debug assert.
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ /* Locking is currently fubar in the panic handler. */
+ if (oops_in_progress)
+ return;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
- char *fnname(int val) \
+ const char *fnname(int val) \
{ \
int i; \
for (i = 0; i < ARRAY_SIZE(list); i++) { \
@@ -54,7 +151,7 @@ struct drm_prop_enum_list {
/*
* Global properties
*/
-static struct drm_prop_enum_list drm_dpms_enum_list[] =
+static const struct drm_prop_enum_list drm_dpms_enum_list[] =
{ { DRM_MODE_DPMS_ON, "On" },
{ DRM_MODE_DPMS_STANDBY, "Standby" },
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
@@ -63,10 +160,17 @@ static struct drm_prop_enum_list drm_dpms_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
+{
+ { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
+ { DRM_PLANE_TYPE_PRIMARY, "Primary" },
+ { DRM_PLANE_TYPE_CURSOR, "Cursor" },
+};
+
/*
* Optional properties
*/
-static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{
{ DRM_MODE_SCALE_NONE, "None" },
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
@@ -74,16 +178,10 @@ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
-static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
- { DRM_MODE_DITHERING_OFF, "Off" },
- { DRM_MODE_DITHERING_ON, "On" },
-};
-
/*
* Non-global properties, but "required" for certain connectors.
*/
-static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
@@ -92,7 +190,7 @@ static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
@@ -102,7 +200,7 @@ static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
-static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -113,7 +211,7 @@ static struct drm_prop_enum_list drm_tv_select_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -125,72 +223,86 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
-static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
{ DRM_MODE_DIRTY_OFF, "Off" },
{ DRM_MODE_DIRTY_ON, "On" },
{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
};
-DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
- drm_dirty_info_enum_list)
-
struct drm_conn_prop_enum_list {
int type;
- char *name;
- int count;
+ const char *name;
+ struct ida ida;
};
/*
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
- { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
- { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
- { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
- { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
- { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
- { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
- { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
- { DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
- { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { DRM_MODE_CONNECTOR_VGA, "VGA" },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+ { DRM_MODE_CONNECTOR_Composite, "Composite" },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+ { DRM_MODE_CONNECTOR_Component, "Component" },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+ { DRM_MODE_CONNECTOR_TV, "TV" },
+ { DRM_MODE_CONNECTOR_eDP, "eDP" },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+ { DRM_MODE_CONNECTOR_DSI, "DSI" },
};
-static struct drm_prop_enum_list drm_encoder_enum_list[] =
+static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ { DRM_MODE_ENCODER_NONE, "None" },
{ DRM_MODE_ENCODER_DAC, "DAC" },
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
+ { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+ { DRM_MODE_ENCODER_DSI, "DSI" },
+ { DRM_MODE_ENCODER_DPMST, "DP MST" },
};
-char *drm_get_encoder_name(struct drm_encoder *encoder)
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
{
- static char buf[32];
+ { SubPixelUnknown, "Unknown" },
+ { SubPixelHorizontalRGB, "Horizontal RGB" },
+ { SubPixelHorizontalBGR, "Horizontal BGR" },
+ { SubPixelVerticalRGB, "Vertical RGB" },
+ { SubPixelVerticalBGR, "Vertical BGR" },
+ { SubPixelNone, "None" },
+};
- snprintf(buf, 32, "%s-%d",
- drm_encoder_enum_list[encoder->encoder_type].name,
- encoder->base.id);
- return buf;
+void drm_connector_ida_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_init(&drm_connector_enum_list[i].ida);
}
-EXPORT_SYMBOL(drm_get_encoder_name);
-char *drm_get_connector_name(struct drm_connector *connector)
+void drm_connector_ida_destroy(void)
{
- static char buf[32];
+ int i;
- snprintf(buf, 32, "%s-%d",
- drm_connector_enum_list[connector->connector_type].name,
- connector->connector_type_id);
- return buf;
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_destroy(&drm_connector_enum_list[i].ida);
}
-EXPORT_SYMBOL(drm_get_connector_name);
-char *drm_get_connector_status_name(enum drm_connector_status status)
+/**
+ * drm_get_connector_status_name - return a string for connector status
+ * @status: connector status to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_connector_status_name(enum drm_connector_status status)
{
if (status == connector_status_connected)
return "connected";
@@ -199,89 +311,162 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
else
return "unknown";
}
+EXPORT_SYMBOL(drm_get_connector_status_name);
/**
- * drm_mode_object_get - allocate a new identifier
- * @dev: DRM device
- * @ptr: object pointer, used to generate unique ID
- * @type: object type
+ * drm_get_subpixel_order_name - return a string for a given subpixel enum
+ * @order: enum of subpixel_order
+ *
+ * Note you could abuse this and return something out of bounds, but that
+ * would be a caller error. No unscrubbed user data should make it here.
+ */
+const char *drm_get_subpixel_order_name(enum subpixel_order order)
+{
+ return drm_subpixel_enum_list[order].name;
+}
+EXPORT_SYMBOL(drm_get_subpixel_order_name);
+
+static char printable_char(int c)
+{
+ return isascii(c) && isprint(c) ? c : '?';
+}
+
+/**
+ * drm_get_format_name - return a string for drm fourcc format
+ * @format: format to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
*
- * LOCKING:
+ * FIXME: This isn't really multithreading safe.
+ */
+const char *drm_get_format_name(uint32_t format)
+{
+ static char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "%c%c%c%c %s-endian (0x%08x)",
+ printable_char(format & 0xff),
+ printable_char((format >> 8) & 0xff),
+ printable_char((format >> 16) & 0xff),
+ printable_char((format >> 24) & 0x7f),
+ format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+ format);
+
+ return buf;
+}
+EXPORT_SYMBOL(drm_get_format_name);
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
- * for tracking modes, CRTCs and connectors.
+ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
+ * modeset identifiers are _not_ reference counted. Hence don't use this for
+ * reference counted modeset objects like framebuffers.
*
- * RETURNS:
+ * Returns:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
*/
-static int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type)
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
{
- int new_id = 0;
int ret;
-again:
- if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Ran out memory getting a mode number\n");
- return -EINVAL;
- }
-
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = ret;
+ obj->type = obj_type;
+ }
mutex_unlock(&dev->mode_config.idr_mutex);
- if (ret == -EAGAIN)
- goto again;
- obj->id = new_id;
- obj->type = obj_type;
- return 0;
+ return ret < 0 ? ret : 0;
}
/**
- * drm_mode_object_put - free an identifer
+ * drm_mode_object_put - free a modeset identifer
* @dev: DRM device
- * @id: ID to free
+ * @object: object to free
*
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
- * Free @id from @dev's unique identifier pool.
+ * Free @id from @dev's unique identifier pool. Note that despite the _get
+ * postfix modeset identifiers are _not_ reference counted. Hence don't use this
+ * for reference counted modeset objects like framebuffers.
*/
-static void drm_mode_object_put(struct drm_device *dev,
- struct drm_mode_object *object)
+void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, object->id);
mutex_unlock(&dev->mode_config.idr_mutex);
}
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+static struct drm_mode_object *_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != type) || (obj->id != id))
+ if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
+ (obj->id != id))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
return obj;
}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+ * are reference counted, they need special treatment. Even with
+ * DRM_MODE_OBJECT_ANY (although that will simply return NULL
+ * rather than WARN_ON()).
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+
+ /* Framebuffers are reference counted and need their own lookup
+ * function.*/
+ WARN_ON(type == DRM_MODE_OBJECT_FB);
+ obj = _object_find(dev, id, type);
+ /* don't leak out unref'd fb's */
+ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
+ obj = NULL;
+ return obj;
+}
EXPORT_SYMBOL(drm_mode_object_find);
/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
*
* Allocates an ID for the framebuffer's parent mode object, sets its mode
* functions & device file and adds it to the master fd list.
*
- * RETURNS:
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * Returns:
* Zero on success, error code on failure.
*/
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
@@ -289,100 +474,306 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
- ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
- if (ret) {
- return ret;
- }
-
+ mutex_lock(&dev->mode_config.fb_lock);
+ kref_init(&fb->refcount);
+ INIT_LIST_HEAD(&fb->filp_head);
fb->dev = dev;
fb->funcs = funcs;
+
+ ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+ if (ret)
+ goto out;
+
+ /* Grab the idr reference. */
+ drm_framebuffer_reference(fb);
+
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
+out:
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
EXPORT_SYMBOL(drm_framebuffer_init);
+static void drm_framebuffer_free(struct kref *kref)
+{
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, refcount);
+ fb->funcs->destroy(fb);
+}
+
+static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj = NULL;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
+ fb = NULL;
+ else
+ fb = obj_to_fb(obj);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again, using @drm_framebuffer_unreference.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, id);
+ if (fb)
+ drm_framebuffer_reference(fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
+ kref_put(&fb->refcount, drm_framebuffer_free);
+}
+EXPORT_SYMBOL(drm_framebuffer_unreference);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
+ kref_get(&fb->refcount);
+}
+EXPORT_SYMBOL(drm_framebuffer_reference);
+
+static void drm_framebuffer_free_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
+ kref_put(&fb->refcount, drm_framebuffer_free_bug);
+}
+
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ fb->base.id = 0;
+
+ __drm_framebuffer_unreference(fb);
+}
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped and drop idr ref. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Cleanup framebuffer. This function is intended to be used from the drivers
+ * ->destroy callback. It can also be used to clean up driver private
+ * framebuffers embedded into a larger structure.
*
- * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
- * it, setting it to NULL.
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config. If they're
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
+ struct drm_plane *plane;
struct drm_mode_set set;
int ret;
- /* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
- /* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = crtc->funcs->set_config(&set);
- if (ret)
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ /*
+ * drm ABI mandates that we remove any deleted framebuffers from active
+ * useage. But since most sane clients only remove framebuffers they no
+ * longer need, try to optimize this away.
+ *
+ * Since we're holding a reference ourselves, observing a refcount of 1
+ * means that we're the last holder and can skip it. Also, the refcount
+ * can never increase from 1 again, so we don't need any barriers or
+ * locks.
+ *
+ * Note that userspace could try to race with use and instate a new
+ * usage _after_ we've cleared all current ones. End result will be an
+ * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+ * in this manner.
+ */
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->primary->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
+ }
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb)
+ drm_plane_force_disable(plane);
}
+ drm_modeset_unlock_all(dev);
}
- drm_mode_object_put(dev, &fb->base);
- list_del(&fb->head);
- dev->mode_config.num_fb--;
+ drm_framebuffer_unreference(fb);
}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
+EXPORT_SYMBOL(drm_framebuffer_remove);
+
+DEFINE_WW_CLASS(crtc_ww_class);
/**
- * drm_crtc_init - Initialise a new CRTC object
+ * drm_crtc_init_with_planes - Initialise a new CRTC object with
+ * specified primary and cursor planes.
* @dev: DRM device
* @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Inits a new object created as base part of a driver crtc object.
*
- * Inits a new object created as base part of an driver crtc object.
+ * Returns:
+ * Zero on success, error code on failure.
*/
-void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs)
+int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ void *cursor,
+ const struct drm_crtc_funcs *funcs)
{
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret;
+
crtc->dev = dev;
crtc->funcs = funcs;
+ crtc->invert_dimensions = false;
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ drm_modeset_lock_all(dev);
+ drm_modeset_lock_init(&crtc->mutex);
+ /* dropped by _unlock_all(): */
+ drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
- list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
- dev->mode_config.num_crtc++;
- mutex_unlock(&dev->mode_config.mutex);
+ ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ if (ret)
+ goto out;
+
+ crtc->base.properties = &crtc->properties;
+
+ list_add_tail(&crtc->head, &config->crtc_list);
+ config->num_crtc++;
+
+ crtc->primary = primary;
+ if (primary)
+ primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
}
-EXPORT_SYMBOL(drm_crtc_init);
+EXPORT_SYMBOL(drm_crtc_init_with_planes);
/**
- * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * drm_crtc_cleanup - Clean up the core crtc usage
* @crtc: CRTC to cleanup
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Cleanup @crtc. Removes from drm modesetting space
- * does NOT free object, caller does that.
+ * This function cleans up @crtc and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the crtc structure itself,
+ * this is the responsibility of the caller.
*/
void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
+
+ drm_modeset_lock_fini(&crtc->mutex);
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
@@ -391,81 +782,113 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_cleanup);
/**
- * drm_mode_probed_add - add a mode to a connector's probed mode list
- * @connector: connector the new mode
- * @mode: mode data
- *
- * LOCKING:
- * Caller must hold mode config lock.
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
*
- * Add @mode to @connector's mode list for later use.
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
*/
-void drm_mode_probed_add(struct drm_connector *connector,
- struct drm_display_mode *mode)
+unsigned int drm_crtc_index(struct drm_crtc *crtc)
{
- list_add(&mode->head, &connector->probed_modes);
+ unsigned int index = 0;
+ struct drm_crtc *tmp;
+
+ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ return index;
+
+ index++;
+ }
+
+ BUG();
}
-EXPORT_SYMBOL(drm_mode_probed_add);
+EXPORT_SYMBOL(drm_crtc_index);
-/**
+/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
* @mode: mode to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Remove @mode from @connector's mode list, then free it.
*/
-void drm_mode_remove(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
list_del(&mode->head);
- kfree(mode);
+ drm_mode_destroy(connector->dev, mode);
}
-EXPORT_SYMBOL(drm_mode_remove);
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
- * @name: user visible name of the connector
- *
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
+ * @connector_type: user visible type of the connector
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
*/
-void drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
{
- mutex_lock(&dev->mode_config.mutex);
+ int ret;
+ struct ida *connector_ida =
+ &drm_connector_enum_list[connector_type].ida;
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ if (ret)
+ goto out_unlock;
+
+ connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
- drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
connector->connector_type = connector_type;
connector->connector_type_id =
- ++drm_connector_enum_list[connector_type].count; /* TODO */
- INIT_LIST_HEAD(&connector->user_modes);
+ ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+ if (connector->connector_type_id < 0) {
+ ret = connector->connector_type_id;
+ goto out_put;
+ }
+ connector->name =
+ kasprintf(GFP_KERNEL, "%s-%d",
+ drm_connector_enum_list[connector_type].name,
+ connector->connector_type_id);
+ if (!connector->name) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
- drm_connector_attach_property(connector,
- dev->mode_config.edid_property, 0);
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.edid_property,
+ 0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
- mutex_unlock(&dev->mode_config.mutex);
+out_put:
+ if (ret)
+ drm_mode_object_put(dev, &connector->base);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
}
EXPORT_SYMBOL(drm_connector_init);
@@ -473,9 +896,6 @@ EXPORT_SYMBOL(drm_connector_init);
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
- * LOCKING:
- * Caller must hold @dev's mode_config lock.
- *
* Cleans up the connector but doesn't free the object.
*/
void drm_connector_cleanup(struct drm_connector *connector)
@@ -489,95 +909,311 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
- list_for_each_entry_safe(mode, t, &connector->user_modes, head)
- drm_mode_remove(connector, mode);
+ ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
+ connector->connector_type_id);
- kfree(connector->fb_helper_private);
- mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
+ kfree(connector->name);
+ connector->name = NULL;
list_del(&connector->head);
- mutex_unlock(&dev->mode_config.mutex);
+ dev->mode_config.num_connector--;
}
EXPORT_SYMBOL(drm_connector_cleanup);
-void drm_encoder_init(struct drm_device *dev,
+/**
+ * drm_connector_unplug_all - unregister connector userspace interfaces
+ * @dev: drm device
+ *
+ * This function unregisters all connector userspace interfaces in sysfs. Should
+ * be call when the device is disconnected, e.g. from an usb driver's
+ * ->disconnect callback.
+ */
+void drm_connector_unplug_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+
+ /* taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_sysfs_connector_remove(connector);
+
+}
+EXPORT_SYMBOL(drm_connector_unplug_all);
+
+/**
+ * drm_bridge_init - initialize a drm transcoder/bridge
+ * @dev: drm device
+ * @bridge: transcoder/bridge to set up
+ * @funcs: bridge function table
+ *
+ * Initialises a preallocated bridge. Bridges should be
+ * subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
+ const struct drm_bridge_funcs *funcs)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
+ if (ret)
+ goto out;
+
+ bridge->dev = dev;
+ bridge->funcs = funcs;
+
+ list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
+ dev->mode_config.num_bridge++;
+
+ out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_init);
+
+/**
+ * drm_bridge_cleanup - cleans up an initialised bridge
+ * @bridge: bridge to cleanup
+ *
+ * Cleans up the bridge but doesn't free the object.
+ */
+void drm_bridge_cleanup(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+
+ drm_modeset_lock_all(dev);
+ drm_mode_object_put(dev, &bridge->base);
+ list_del(&bridge->head);
+ dev->mode_config.num_bridge--;
+ drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_bridge_cleanup);
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder. Encoder should be
+ * subclassed as part of driver encoder objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type)
{
- mutex_lock(&dev->mode_config.mutex);
+ int ret;
- encoder->dev = dev;
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+ goto out_unlock;
- drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
+ encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
+ drm_encoder_enum_list[encoder_type].name,
+ encoder->base.id);
+ if (!encoder->name) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
- mutex_unlock(&dev->mode_config.mutex);
+out_put:
+ if (ret)
+ drm_mode_object_put(dev, &encoder->base);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
}
EXPORT_SYMBOL(drm_encoder_init);
+/**
+ * drm_encoder_cleanup - cleans up an initialised encoder
+ * @encoder: encoder to cleanup
+ *
+ * Cleans up the encoder but doesn't free the object.
+ */
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
+ kfree(encoder->name);
+ encoder->name = NULL;
list_del(&encoder->head);
- mutex_unlock(&dev->mode_config.mutex);
+ dev->mode_config.num_encoder--;
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_encoder_cleanup);
/**
- * drm_mode_create - create a new display mode
+ * drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @type: type of plane (overlay, primary, cursor)
*
- * LOCKING:
- * Caller must hold DRM mode_config lock.
+ * Initializes a plane object of type @type.
*
- * Create a new drm_display_mode, give it an ID, and return it.
- *
- * RETURNS:
- * Pointer to new mode on success, NULL on error.
+ * Returns:
+ * Zero on success, error code on failure.
*/
-struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ enum drm_plane_type type)
{
- struct drm_display_mode *nmode;
+ int ret;
- nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
- if (!nmode)
- return NULL;
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+ goto out;
+
+ plane->base.properties = &plane->properties;
+ plane->dev = dev;
+ plane->funcs = funcs;
+ plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+ GFP_KERNEL);
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
+ plane->type = type;
+
+ list_add_tail(&plane->head, &dev->mode_config.plane_list);
+ dev->mode_config.num_total_plane++;
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ dev->mode_config.num_overlay_plane++;
- drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
- return nmode;
+ drm_object_attach_property(&plane->base,
+ dev->mode_config.plane_type_property,
+ plane->type);
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
}
-EXPORT_SYMBOL(drm_mode_create);
+EXPORT_SYMBOL(drm_universal_plane_init);
/**
- * drm_mode_destroy - remove a mode
+ * drm_plane_init - Initialize a legacy plane
* @dev: DRM device
- * @mode: mode to remove
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @is_primary: plane type (primary vs overlay)
+ *
+ * Legacy API to initialize a DRM plane.
+ *
+ * New drivers should call drm_universal_plane_init() instead.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool is_primary)
+{
+ enum drm_plane_type type;
+
+ type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, type);
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ drm_modeset_lock_all(dev);
+ kfree(plane->format_types);
+ drm_mode_object_put(dev, &plane->base);
+
+ BUG_ON(list_empty(&plane->head));
+
+ list_del(&plane->head);
+ dev->mode_config.num_total_plane--;
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ dev->mode_config.num_overlay_plane--;
+ drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
+/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Forces the plane to be disabled.
*
- * Free @mode's unique identifier, then free it.
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
*/
-void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+void drm_plane_force_disable(struct drm_plane *plane)
{
- drm_mode_object_put(dev, &mode->base);
+ struct drm_framebuffer *old_fb = plane->fb;
+ int ret;
- kfree(mode);
+ if (!old_fb)
+ return;
+
+ ret = plane->funcs->disable_plane(plane);
+ if (ret) {
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ return;
+ }
+ /* disconnect the plane from the fb and crtc: */
+ __drm_framebuffer_unreference(old_fb);
+ plane->fb = NULL;
+ plane->crtc = NULL;
}
-EXPORT_SYMBOL(drm_mode_destroy);
+EXPORT_SYMBOL(drm_plane_force_disable);
static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
- int i;
/*
* Standard properties (apply to all connectors)
@@ -587,16 +1223,29 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
"EDID", 0);
dev->mode_config.edid_property = edid;
- dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
- drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
- drm_dpms_enum_list[i].name);
+ dpms = drm_property_create_enum(dev, 0,
+ "DPMS", drm_dpms_enum_list,
+ ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
return 0;
}
+static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
+{
+ struct drm_property *type;
+
+ /*
+ * Standard properties (apply to all planes)
+ */
+ type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "type", drm_plane_type_enum_list,
+ ARRAY_SIZE(drm_plane_type_enum_list));
+ dev->mode_config.plane_type_property = type;
+
+ return 0;
+}
+
/**
* drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
* @dev: DRM device
@@ -607,30 +1256,21 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
{
struct drm_property *dvi_i_selector;
struct drm_property *dvi_i_subconnector;
- int i;
if (dev->mode_config.dvi_i_select_subconnector_property)
return 0;
dvi_i_selector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ drm_property_create_enum(dev, 0,
"select subconnector",
+ drm_dvi_i_select_enum_list,
ARRAY_SIZE(drm_dvi_i_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
- drm_property_add_enum(dvi_i_selector, i,
- drm_dvi_i_select_enum_list[i].type,
- drm_dvi_i_select_enum_list[i].name);
dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
- dvi_i_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
+ dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
+ drm_dvi_i_subconnector_enum_list,
ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
- drm_property_add_enum(dvi_i_subconnector, i,
- drm_dvi_i_subconnector_enum_list[i].type,
- drm_dvi_i_subconnector_enum_list[i].name);
dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
return 0;
@@ -661,51 +1301,33 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
/*
* Basic connector properties
*/
- tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ tv_selector = drm_property_create_enum(dev, 0,
"select subconnector",
+ drm_tv_select_enum_list,
ARRAY_SIZE(drm_tv_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
- drm_property_add_enum(tv_selector, i,
- drm_tv_select_enum_list[i].type,
- drm_tv_select_enum_list[i].name);
dev->mode_config.tv_select_subconnector_property = tv_selector;
tv_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE, "subconnector",
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_tv_subconnector_enum_list,
ARRAY_SIZE(drm_tv_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
- drm_property_add_enum(tv_subconnector, i,
- drm_tv_subconnector_enum_list[i].type,
- drm_tv_subconnector_enum_list[i].name);
dev->mode_config.tv_subconnector_property = tv_subconnector;
/*
* Other, TV specific properties: margins & TV modes.
*/
dev->mode_config.tv_left_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left margin", 2);
- dev->mode_config.tv_left_margin_property->values[0] = 0;
- dev->mode_config.tv_left_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "left margin", 0, 100);
dev->mode_config.tv_right_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right margin", 2);
- dev->mode_config.tv_right_margin_property->values[0] = 0;
- dev->mode_config.tv_right_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "right margin", 0, 100);
dev->mode_config.tv_top_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top margin", 2);
- dev->mode_config.tv_top_margin_property->values[0] = 0;
- dev->mode_config.tv_top_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "top margin", 0, 100);
dev->mode_config.tv_bottom_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom margin", 2);
- dev->mode_config.tv_bottom_margin_property->values[0] = 0;
- dev->mode_config.tv_bottom_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "bottom margin", 0, 100);
dev->mode_config.tv_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
@@ -715,40 +1337,22 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
i, modes[i]);
dev->mode_config.tv_brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- dev->mode_config.tv_brightness_property->values[0] = 0;
- dev->mode_config.tv_brightness_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "brightness", 0, 100);
dev->mode_config.tv_contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- dev->mode_config.tv_contrast_property->values[0] = 0;
- dev->mode_config.tv_contrast_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "contrast", 0, 100);
dev->mode_config.tv_flicker_reduction_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "flicker reduction", 2);
- dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
- dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
dev->mode_config.tv_overscan_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "overscan", 2);
- dev->mode_config.tv_overscan_property->values[0] = 0;
- dev->mode_config.tv_overscan_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "overscan", 0, 100);
dev->mode_config.tv_saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- dev->mode_config.tv_saturation_property->values[0] = 0;
- dev->mode_config.tv_saturation_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "saturation", 0, 100);
dev->mode_config.tv_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- dev->mode_config.tv_hue_property->values[0] = 0;
- dev->mode_config.tv_hue_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "hue", 0, 100);
return 0;
}
@@ -764,18 +1368,14 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{
struct drm_property *scaling_mode;
- int i;
if (dev->mode_config.scaling_mode_property)
return 0;
scaling_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
+ drm_property_create_enum(dev, 0, "scaling mode",
+ drm_scaling_mode_enum_list,
ARRAY_SIZE(drm_scaling_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
- drm_property_add_enum(scaling_mode, i,
- drm_scaling_mode_enum_list[i].type,
- drm_scaling_mode_enum_list[i].name);
dev->mode_config.scaling_mode_property = scaling_mode;
@@ -784,34 +1384,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
- struct drm_property *dithering_mode;
- int i;
-
- if (dev->mode_config.dithering_mode_property)
- return 0;
-
- dithering_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
- ARRAY_SIZE(drm_dithering_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
- drm_property_add_enum(dithering_mode, i,
- drm_dithering_mode_enum_list[i].type,
- drm_dithering_mode_enum_list[i].name);
- dev->mode_config.dithering_mode_property = dithering_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
-/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
@@ -821,71 +1393,29 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property);
int drm_mode_create_dirty_info_property(struct drm_device *dev)
{
struct drm_property *dirty_info;
- int i;
if (dev->mode_config.dirty_info_property)
return 0;
dirty_info =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"dirty",
+ drm_dirty_info_enum_list,
ARRAY_SIZE(drm_dirty_info_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
- drm_property_add_enum(dirty_info, i,
- drm_dirty_info_enum_list[i].type,
- drm_dirty_info_enum_list[i].name);
dev->mode_config.dirty_info_property = dirty_info;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * LOCKING:
- * None, should happen single threaded at init time.
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- idr_init(&dev->mode_config.crtc_idr);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_mode_create_standard_connector_properties(dev);
- mutex_unlock(&dev->mode_config.mutex);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
{
uint32_t total_objects = 0;
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
-
- if (total_objects == 0)
- return -EINVAL;
+ total_objects += dev->mode_config.num_bridge;
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
@@ -894,15 +1424,27 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
+ group->num_bridges = 0;
return 0;
}
+void drm_mode_group_destroy(struct drm_mode_group *group)
+{
+ kfree(group->id_list);
+ group->id_list = NULL;
+}
+
+/*
+ * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
+ * the drm core's responsibility to set up mode control groups.
+ */
int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_mode_group *group)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
+ struct drm_bridge *bridge;
int ret;
if ((ret = drm_mode_group_init(dev, group)))
@@ -919,69 +1461,33 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
- return 0;
-}
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- fb->funcs->destroy(fb);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
+ list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors + group->num_bridges++] =
+ bridge->base.id;
+ return 0;
}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
+EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
-void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
- struct drm_display_mode *in)
+static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+ const struct drm_display_mode *in)
{
+ WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
+ in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+ in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+ in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+ in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
+ "timing values too large for mode info\n");
+
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@@ -1001,19 +1507,25 @@ void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
}
/**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
*/
-void drm_crtc_convert_umode(struct drm_display_mode *out,
- struct drm_mode_modeinfo *in)
+static int drm_crtc_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in)
{
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return -ERANGE;
+
+ if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+ return -EINVAL;
+
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@@ -1030,24 +1542,22 @@ void drm_crtc_convert_umode(struct drm_display_mode *out,
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+
+ return 0;
}
/**
* drm_mode_getresources - get graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
@@ -1071,8 +1581,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
uint32_t __user *encoder_id;
struct drm_mode_group *mode_group;
- mutex_lock(&dev->mode_config.mutex);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&file_priv->fbs_lock);
/*
* For the non-control nodes we need to limit the list of resources
* by IDs in the group list for this node
@@ -1080,9 +1593,26 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each(lh, &file_priv->fbs)
fb_count++;
- mode_group = &file_priv->master->minor->mode_group;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ drm_modeset_lock_all(dev);
+ if (!drm_is_primary_client(file_priv)) {
+ mode_group = NULL;
list_for_each(lh, &dev->mode_config.crtc_list)
crtc_count++;
@@ -1093,6 +1623,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
encoder_count++;
} else {
+ mode_group = &file_priv->master->minor->mode_group;
crtc_count = mode_group->num_crtcs;
connector_count = mode_group->num_connectors;
encoder_count = mode_group->num_encoders;
@@ -1103,29 +1634,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
-
/* CRTCs */
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ if (!mode_group) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
- DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
@@ -1149,12 +1665,12 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_encoders >= encoder_count) {
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ if (!mode_group) {
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
- DRM_DEBUG_KMS("ENCODER ID is %d\n",
- encoder->base.id);
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ encoder->name);
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
@@ -1180,12 +1696,13 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_connectors >= connector_count) {
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ if (!mode_group) {
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
- DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
- connector->base.id);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
@@ -1208,29 +1725,25 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
}
card_res->count_connectors = connector_count;
- DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
+ DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
card_res->count_connectors, card_res->count_encoders);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_mode_getcrtc - get CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a CRTC configuration structure to return to the user.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
@@ -1238,24 +1751,24 @@ int drm_mode_getcrtc(struct drm_device *dev,
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
- struct drm_mode_object *obj;
int ret = 0;
- mutex_lock(&dev->mode_config.mutex);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
- obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
+ drm_modeset_lock_all(dev);
+
+ crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
- if (crtc->fb)
- crtc_resp->fb_id = crtc->fb->base.id;
+ if (crtc->primary->fb)
+ crtc_resp->fb_id = crtc->primary->fb->base.id;
else
crtc_resp->fb_id = 0;
@@ -1269,32 +1782,40 @@ int drm_mode_getcrtc(struct drm_device *dev,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+ const struct drm_file *file_priv)
+{
+ /*
+ * If user-space hasn't configured the driver to expose the stereo 3D
+ * modes, don't expose them.
+ */
+ if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+ return false;
+
+ return true;
+}
+
/**
* drm_mode_getconnector - get connector configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a connector configuration structure to return to the user.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
- struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_display_mode *mode;
int mode_count = 0;
@@ -1309,25 +1830,22 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
- DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->connector_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
+ connector = drm_connector_find(dev, out_resp->connector_id);
+ if (!connector) {
+ ret = -ENOENT;
goto out;
}
- connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
+ props_count = connector->properties.count;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
@@ -1343,7 +1861,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
/* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head)
- mode_count++;
+ if (drm_mode_expose_to_userspace(mode, file_priv))
+ mode_count++;
out_resp->connector_id = connector->base.id;
out_resp->connector_type = connector->connector_type;
@@ -1352,10 +1871,12 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
if (connector->encoder)
out_resp->encoder_id = connector->encoder->base.id;
else
out_resp->encoder_id = 0;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
/*
* This ioctl is called twice, once to determine how much space is
@@ -1363,8 +1884,11 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
+ if (!drm_mode_expose_to_userspace(mode, file_priv))
+ continue;
+
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
@@ -1378,30 +1902,28 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
+ for (i = 0; i < connector->properties.count; i++) {
+ if (put_user(connector->properties.ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ if (put_user(connector->properties.values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_props = props_count;
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
if (put_user(connector->encoder_ids[i],
@@ -1417,25 +1939,39 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out:
mutex_unlock(&dev->mode_config.mutex);
+
return ret;
}
+/**
+ * drm_mode_getencoder - get encoder configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a encoder configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int ret = 0;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, enc_resp->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
- ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+ if (!encoder) {
+ ret = -ENOENT;
goto out;
}
- encoder = obj_to_encoder(obj);
if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
@@ -1447,25 +1983,386 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
enc_resp->possible_clones = encoder->possible_clones;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
- * drm_mode_setcrtc - set CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * drm_mode_getplane_res - enumerate all plane resources
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
*
- * LOCKING:
- * Caller? (FIXME)
+ * Construct a list of plane ids to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0, ret = 0;
+ unsigned num_planes;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ config = &dev->mode_config;
+
+ if (file_priv->universal_planes)
+ num_planes = config->num_total_plane;
+ else
+ num_planes = config->num_overlay_plane;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (num_planes &&
+ (plane_resp->count_planes >= num_planes)) {
+ plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, &config->plane_list, head) {
+ /*
+ * Unless userspace set the 'universal planes'
+ * capability bit, only advertise overlays.
+ */
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+ !file_priv->universal_planes)
+ continue;
+
+ if (put_user(plane->base.id, plane_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = num_planes;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane configuration
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Construct a plane configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_plane *plane;
+ uint32_t __user *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ plane = drm_plane_find(dev, plane_resp->plane_id);
+ if (!plane) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (copy_to_user(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - configure a plane's configuration
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_priv: DRM file info
+ *
+ * Set plane configuration, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ plane = drm_plane_find(dev, plane_req->plane_id);
+ if (!plane) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ return -ENOENT;
+ }
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
+ ret = plane->funcs->disable_plane(plane);
+ if (!ret) {
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ } else {
+ old_fb = NULL;
+ }
+ drm_modeset_unlock_all(dev);
+ goto out;
+ }
+
+ crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ if (!crtc) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ fb = NULL;
+ } else {
+ old_fb = NULL;
+ }
+ drm_modeset_unlock_all(dev);
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+
+ return ret;
+}
+
+/**
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
+ *
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_set_config_internal(struct drm_mode_set *set)
+{
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *tmp;
+ int ret;
+
+ /*
+ * NOTE: ->set_config can also disable other crtcs (if we steal all
+ * connectors from it), hence we need to refcount the fbs across all
+ * crtcs. Atomic modeset will have saner semantics ...
+ */
+ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+ tmp->old_fb = tmp->primary->fb;
+
+ fb = set->fb;
+
+ ret = crtc->funcs->set_config(set);
+ if (ret == 0) {
+ crtc->primary->crtc = crtc;
+ crtc->primary->fb = fb;
+ }
+
+ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ if (tmp->primary->fb)
+ drm_framebuffer_reference(tmp->primary->fb);
+ if (tmp->old_fb)
+ drm_framebuffer_unreference(tmp->old_fb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_set_config_internal);
+
+/**
+ * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
+ * CRTC viewport
+ * @crtc: CRTC that framebuffer will be displayed on
+ * @x: x panning
+ * @y: y panning
+ * @mode: mode that framebuffer will be displayed under
+ * @fb: framebuffer to check size of
+ */
+int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+ int x, int y,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb)
+
+{
+ int hdisplay, vdisplay;
+
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (drm_mode_is_stereo(mode)) {
+ struct drm_display_mode adjusted = *mode;
+
+ drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
+ hdisplay = adjusted.crtc_hdisplay;
+ vdisplay = adjusted.crtc_vdisplay;
+ }
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ x > fb->width - hdisplay ||
+ y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, x, y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_check_viewport);
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Build a new CRTC configuration based on user request.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
@@ -1473,53 +2370,72 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc, *crtcfb;
+ struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
- int ret = 0;
+ int ret;
int i;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* For some reason crtc x/y offsets are signed internally. */
+ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ return -ERANGE;
+
+ drm_modeset_lock_all(dev);
+ crtc = drm_crtc_find(dev, crtc_req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
- list_for_each_entry(crtcfb,
- &dev->mode_config.crtc_list, head) {
- if (crtcfb == crtc) {
- DRM_DEBUG_KMS("Using current fb for "
- "setmode\n");
- fb = crtc->fb;
- }
+ if (!crtc->primary->fb) {
+ DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ ret = -EINVAL;
+ goto out;
}
+ fb = crtc->primary->fb;
+ /* Make refcounting symmetric with the lookup path. */
+ drm_framebuffer_reference(fb);
} else {
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
- fb = obj_to_fb(obj);
}
mode = drm_mode_create(dev);
- drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+
+ ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+ mode, fb);
+ if (ret)
+ goto out;
+
}
if (crtc_req->count_connectors == 0 && mode) {
@@ -1553,21 +2469,22 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
for (i = 0; i < crtc_req->count_connectors; i++) {
- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
}
- obj = drm_mode_object_find(dev, out_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
+ connector = drm_connector_find(dev, out_id);
+ if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
- ret = -EINVAL;
+ ret = -ENOENT;
goto out;
}
- connector = obj_to_connector(obj);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
connector_set[i] = connector;
}
@@ -1580,242 +2497,546 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
- ret = crtc->funcs->set_config(&set);
+ ret = drm_mode_set_config_internal(&set);
out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
kfree(connector_set);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_mode_destroy(dev, mode);
+ drm_modeset_unlock_all(dev);
return ret;
}
-int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+static int drm_mode_cursor_common(struct drm_device *dev,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
{
- struct drm_mode_cursor *req = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- }
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
+ return -ENOENT;
}
- crtc = obj_to_crtc(obj);
+ drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
- if (!crtc->funcs->cursor_set) {
- DRM_ERROR("crtc does not support cursor\n");
+ if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
goto out;
}
/* Turns off the cursor if handle is 0 */
- ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
- req->width, req->height);
+ if (crtc->funcs->cursor_set2)
+ ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+ req->width, req->height, req->hot_x, req->hot_y);
+ else
+ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+ req->width, req->height);
}
if (req->flags & DRM_MODE_CURSOR_MOVE) {
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
- DRM_ERROR("crtc does not support cursor\n");
ret = -EFAULT;
goto out;
}
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock(&crtc->mutex);
+
return ret;
+
}
+
/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * drm_mode_cursor_ioctl - set CRTC's cursor configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
- * LOCKING:
- * Takes mode config lock.
+ * Set the cursor configuration based on user request.
*
- * Add a new FB to the specified CRTC, given a user request.
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_cursor2 new_req;
+
+ memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+ new_req.hot_x = new_req.hot_y = 0;
+
+ return drm_mode_cursor_common(dev, &new_req, file_priv);
+}
+
+/**
+ * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Set the cursor configuration based on user request. This implements the 2nd
+ * version of the cursor ioctl, which allows userspace to additionally specify
+ * the hotspot of the pointer.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_cursor2 *req = data;
+ return drm_mode_cursor_common(dev, req, file_priv);
+}
+
+/**
+ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
+ * @bpp: bits per pixels
+ * @depth: bit depth per pixel
+ *
+ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+ * Useful in fbdev emulation code, since that deals in those values.
+ */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_C8;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request. This is the
+ * original addfb ioclt which only supported RGB formats.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return -EINVAL;
+
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return -EINVAL;
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ return PTR_ERR(fb);
+ }
+
+ mutex_lock(&file_priv->fbs_lock);
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ return ret;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ DRM_DEBUG_KMS("invalid pixel format %s\n",
+ drm_get_format_name(r->pixel_format));
+ return -EINVAL;
+ }
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_DEBUG_KMS("bad framebuffer format %s\n",
+ drm_get_format_name(r->pixel_format));
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ return -EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("mode new framebuffer width not within limits\n");
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("mode new framebuffer height not within limits\n");
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
return -EINVAL;
}
- mutex_lock(&dev->mode_config.mutex);
-
- /* TODO check buffer is sufficently large */
- /* TODO setup destructor callback */
+ ret = framebuffer_check(r);
+ if (ret)
+ return ret;
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
- if (!fb) {
- DRM_ERROR("could not create framebuffer\n");
- ret = -EINVAL;
- goto out;
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ return PTR_ERR(fb);
}
+ mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
+
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_rmfb - remove an FB from the configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Remove the FB specified by the user.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
uint32_t *id = data;
- int ret = 0;
int found = 0;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we realy get a framebuffer back. */
- if (!obj) {
- DRM_ERROR("mode invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&file_priv->fbs_lock);
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ goto fail_lookup;
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
+ if (!found)
+ goto fail_lookup;
- if (!found) {
- DRM_ERROR("tried to remove a fb that we didn't own\n");
- ret = -EINVAL;
- goto out;
- }
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
- /* TODO release all crtc connected to the framebuffer */
- /* TODO unhock the destructor from the buffer object */
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
+ drm_framebuffer_remove(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return 0;
+
+fail_lookup:
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ return -ENOENT;
}
/**
* drm_mode_getfb - get FB info
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Caller? (FIXME)
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Lookup the FB given its ID and return info about it.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -ENOENT;
r->height = fb->height;
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
- fb->funcs->create_handle(fb, file_priv, &r->handle);
+ r->pitch = fb->pitches[0];
+ if (fb->funcs->create_handle) {
+ if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
+ drm_is_control_client(file_priv)) {
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handle);
+ } else {
+ /* GET_FB() is an unprivileged ioctl so we must not
+ * return a buffer-handle to non-master processes! For
+ * backwards-compatibility reasons, we cannot make
+ * GET_FB() privileged, so just return an invalid handle
+ * for non-masters. */
+ r->handle = 0;
+ ret = 0;
+ }
+ } else {
+ ret = -ENODEV;
+ }
+
+ drm_framebuffer_unreference(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
+/**
+ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
+ * rectangle list. Generic userspace which does frontbuffer rendering must call
+ * this ioctl to flush out the changes on manual-update display outputs, e.g.
+ * usb display-link, mipi manual update panels or edp panel self refresh modes.
+ *
+ * Modesetting drivers which always update the frontbuffer do not need to
+ * implement the corresponding ->dirty framebuffer callback.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
- int ret = 0;
+ int ret;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
- ret = -EINVAL;
- goto out_err1;
- }
- fb = obj_to_fb(obj);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -ENOENT;
num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
@@ -1831,6 +3052,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
@@ -1839,37 +3064,37 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
- if (ret)
+ if (ret) {
+ ret = -EFAULT;
goto out_err2;
+ }
}
if (fb->funcs->dirty) {
- ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+ ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+ clips, num_clips);
} else {
ret = -ENOSYS;
- goto out_err2;
}
out_err2:
kfree(clips);
out_err1:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(fb);
+
return ret;
}
/**
* drm_fb_release - remove and free the FBs on this file
- * @filp: file * from the ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @priv: drm file for the ioctl
*
* Destroy all the FBs associated with @filp.
*
* Called by the user via ioctl.
*
- * RETURNS:
+ * Returns:
* Zero on success, errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
@@ -1877,213 +3102,270 @@ void drm_fb_release(struct drm_file *priv)
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&priv->fbs_lock);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ list_del_init(&fb->filp_head);
+
+ /* This will also drop the fpriv->fbs reference. */
+ drm_framebuffer_remove(fb);
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&priv->fbs_lock);
}
/**
- * drm_mode_attachmode - add a mode to the user mode list
- * @dev: DRM device
- * @connector: connector to add the mode to
- * @mode: mode to add
+ * drm_property_create - create a new property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
*
- * Add @mode to @connector's user mode list.
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
*/
-static int drm_mode_attachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
{
- int ret = 0;
+ struct drm_property *property = NULL;
+ int ret;
- list_add_tail(&mode->head, &connector->user_modes);
- return ret;
-}
+ property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+ if (!property)
+ return NULL;
-int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
- int ret = 0;
- struct drm_display_mode *dup_mode;
- int need_dup = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- break;
- if (connector->encoder->crtc == crtc) {
- if (need_dup)
- dup_mode = drm_mode_duplicate(dev, mode);
- else
- dup_mode = mode;
- ret = drm_mode_attachmode(dev, connector, dup_mode);
- if (ret)
- return ret;
- need_dup = 1;
- }
+ property->dev = dev;
+
+ if (num_values) {
+ property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+ if (!property->values)
+ goto fail;
}
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_attachmode_crtc);
-static int drm_mode_detachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- int found = 0;
- int ret = 0;
- struct drm_display_mode *match_mode, *t;
+ ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ if (ret)
+ goto fail;
- list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
- if (drm_mode_equal(match_mode, mode)) {
- list_del(&match_mode->head);
- drm_mode_destroy(dev, match_mode);
- found = 1;
- break;
- }
- }
+ property->flags = flags;
+ property->num_values = num_values;
+ INIT_LIST_HEAD(&property->enum_blob_list);
- if (!found)
- ret = -EINVAL;
+ if (name) {
+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ }
- return ret;
-}
+ list_add_tail(&property->head, &dev->mode_config.property_list);
-int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
-{
- struct drm_connector *connector;
+ WARN_ON(!drm_property_type_valid(property));
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_mode_detachmode(dev, connector, mode);
- }
- return 0;
+ return property;
+fail:
+ kfree(property->values);
+ kfree(property);
+ return NULL;
}
-EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+EXPORT_SYMBOL(drm_property_create);
/**
- * drm_fb_attachmode - Attach a user mode to an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * drm_property_create - create a new enumeration property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property values
+ * @num_values: number of pre-defined values
*
- * This attaches a user specified mode to an connector.
- * Called by the user via ioctl.
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
*
- * RETURNS:
- * Zero on success, errno on failure.
+ * Userspace is only allowed to set one of the predefined values for enumeration
+ * properties.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
*/
-int drm_mode_attachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
{
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- struct drm_mode_object *obj;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ struct drm_property *property;
+ int i, ret;
- mutex_lock(&dev->mode_config.mutex);
+ flags |= DRM_MODE_PROP_ENUM;
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- connector = obj_to_connector(obj);
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
- mode = drm_mode_create(dev);
- if (!mode) {
- ret = -ENOMEM;
- goto out;
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
}
- drm_crtc_convert_umode(mode, umode);
-
- ret = drm_mode_attachmode(dev, connector, mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return property;
}
-
+EXPORT_SYMBOL(drm_property_create_enum);
/**
- * drm_fb_detachmode - Detach a user specified mode from an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * drm_property_create - create a new bitmask property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property bitflags
+ * @num_values: number of pre-defined values
*
- * Called by the user via ioctl.
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
*
- * RETURNS:
- * Zero on success, errno on failure.
+ * Compared to plain enumeration properties userspace is allowed to set any
+ * or'ed together combination of the predefined property bitflag values
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
*/
-int drm_mode_detachmode_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
{
- struct drm_mode_object *obj;
- struct drm_mode_mode_cmd *mode_cmd = data;
- struct drm_connector *connector;
- struct drm_display_mode mode;
- struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ struct drm_property *property;
+ int i, ret;
- mutex_lock(&dev->mode_config.mutex);
+ flags |= DRM_MODE_PROP_BITMASK;
- obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto out;
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
}
- connector = obj_to_connector(obj);
- drm_crtc_convert_umode(&mode, umode);
- ret = drm_mode_detachmode(dev, connector, &mode);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return property;
}
+EXPORT_SYMBOL(drm_property_create_bitmask);
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values)
+static struct drm_property *property_create_range(struct drm_device *dev,
+ int flags, const char *name,
+ uint64_t min, uint64_t max)
{
- struct drm_property *property = NULL;
+ struct drm_property *property;
- property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+ property = drm_property_create(dev, flags, name, 2);
if (!property)
return NULL;
- if (num_values) {
- property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
- if (!property->values)
- goto fail;
- }
+ property->values[0] = min;
+ property->values[1] = max;
- drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
- property->flags = flags;
- property->num_values = num_values;
- INIT_LIST_HEAD(&property->enum_blob_list);
+ return property;
+}
- if (name)
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
+/**
+ * drm_property_create - create a new ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy.
+ *
+ * Userspace is allowed to set any interger value in the (min, max) range
+ * inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max)
+{
+ return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
+ name, min, max);
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+ int flags, const char *name,
+ int64_t min, int64_t max)
+{
+ return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
+ name, I642U64(min), I642U64(max));
+}
+EXPORT_SYMBOL(drm_property_create_signed_range);
+
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+ int flags, const char *name, uint32_t type)
+{
+ struct drm_property *property;
+
+ flags |= DRM_MODE_PROP_OBJECT;
+
+ property = drm_property_create(dev, flags, name, 1);
+ if (!property)
+ return NULL;
+
+ property->values[0] = type;
- list_add_tail(&property->head, &dev->mode_config.property_list);
return property;
-fail:
- kfree(property);
- return NULL;
}
-EXPORT_SYMBOL(drm_property_create);
+EXPORT_SYMBOL(drm_property_create_object);
+/**
+ * drm_property_add_enum - add a possible value to an enumeration property
+ * @property: enumeration property to change
+ * @index: index of the new enumeration
+ * @value: value of the new enumeration
+ * @name: symbolic name of the new enumeration
+ *
+ * This functions adds enumerations to a property.
+ *
+ * It's use is deprecated, drivers should use one of the more specific helpers
+ * to directly create the property with all enumerations already attached.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
struct drm_property_enum *prop_enum;
- if (!(property->flags & DRM_MODE_PROP_ENUM))
+ if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+ (value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@@ -2110,6 +3392,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
}
EXPORT_SYMBOL(drm_property_add_enum);
+/**
+ * drm_property_destroy - destroy a drm property
+ * @dev: drm device
+ * @property: property to destry
+ *
+ * This function frees a property including any attached resources like
+ * enumeration values.
+ */
void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
@@ -2127,65 +3417,111 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-int drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
+/**
+ * drm_object_attach_property - attach a property to a modeset object
+ * @obj: drm modeset object
+ * @property: property to attach
+ * @init_val: initial value of the property
+ *
+ * This attaches the given property to the modeset object with the given initial
+ * value. Currently this function cannot fail since the properties are stored in
+ * a statically sized array.
+ */
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ WARN(1, "Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
}
-EXPORT_SYMBOL(drm_connector_attach_property);
+EXPORT_SYMBOL(drm_object_attach_property);
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
+/**
+ * drm_object_property_set_value - set the value of a property
+ * @obj: drm mode object to set property value for
+ * @property: property to set
+ * @val: value the property should be set to
+ *
+ * This functions sets a given property on a given object. This function only
+ * changes the software state of the property, it does not call into the
+ * driver's ->set_property callback.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_set_value);
+EXPORT_SYMBOL(drm_object_property_set_value);
-int drm_connector_property_get_value(struct drm_connector *connector,
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_get_value);
+EXPORT_SYMBOL(drm_object_property_get_value);
+/**
+ * drm_mode_getproperty_ioctl - get the current value of a connector's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an connectors's property.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
@@ -2196,22 +3532,25 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
- uint32_t *blob_id_ptr;
+ uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
- ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ property = drm_property_find(dev, out_resp->prop_id);
+ if (!property) {
+ ret = -ENOENT;
goto done;
}
- property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
list_for_each_entry(prop_blob, &property->enum_blob_list, head)
blob_count++;
}
@@ -2223,7 +3562,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->flags = property->flags;
if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
for (i = 0; i < value_count; i++) {
if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
@@ -2233,10 +3572,11 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
out_resp->count_values = value_count;
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
@@ -2255,11 +3595,11 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = enum_count;
}
- if (property->flags & DRM_MODE_PROP_BLOB) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+ blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
@@ -2278,7 +3618,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = blob_count;
}
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -2286,6 +3626,7 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
void *data)
{
struct drm_property_blob *blob;
+ int ret;
if (!length || !data)
return NULL;
@@ -2294,13 +3635,16 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
if (!blob)
return NULL;
- blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
+ ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+ if (ret) {
+ kfree(blob);
+ return NULL;
+ }
+
blob->length = length;
memcpy(blob->data, data, length);
- drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
-
list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
return blob;
}
@@ -2313,25 +3657,40 @@ static void drm_property_destroy_blob(struct drm_device *dev,
kfree(blob);
}
+/**
+ * drm_mode_getblob_ioctl - get the contents of a blob property value
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the contents of a blob property. The value stored in
+ * an object's blob property is just a normal modeset object id.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
- void *blob_ptr;
+ void __user *blob_ptr;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
- if (!obj) {
- ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ blob = drm_property_blob_find(dev, out_resp->blob_id);
+ if (!blob) {
+ ret = -ENOENT;
goto done;
}
- blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
- blob_ptr = (void *)(unsigned long)out_resp->data;
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)){
ret = -EFAULT;
goto done;
@@ -2340,15 +3699,26 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
out_resp->length = blob->length;
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
+/**
+ * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0;
+ int ret, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2356,13 +3726,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret;
}
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
- ret = drm_connector_property_set_value(connector,
+ ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -2370,77 +3744,292 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+static bool drm_property_change_is_valid(struct drm_property *property,
+ uint64_t value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
+ int64_t svalue = U642I64(value);
+ if (svalue < U642I64(property->values[0]) ||
+ svalue > U642I64(property->values[1]))
+ return false;
+ return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+ int i;
+ uint64_t valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
+ /* Only the driver knows */
+ return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+ struct drm_mode_object *obj;
+ /* a zero value for an object property translates to null: */
+ if (value == 0)
+ return true;
+ /*
+ * NOTE: use _object_find() directly to bypass restriction on
+ * looking up refcnt'd objects (ie. fb's). For a refcnt'd
+ * object this could race against object finalization, so it
+ * simply tells us that the object *was* valid. Which is good
+ * enough.
+ */
+ obj = _object_find(property->dev, value, property->values[0]);
+ return obj != NULL;
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
+/**
+ * drm_mode_connector_property_set_ioctl - set the current value of a connector property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function sets the current value for a connectors's property. It also
+ * calls into a driver's ->set_property callback to update the hardware state
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_object_property_set_value(&connector->base, property, value);
+ return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
int ret = -EINVAL;
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+/**
+ * drm_mode_getproperty_ioctl - get the current value of a object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an object's property. Compared
+ * to the connector specific ioctl this one is extended to also work on crtc and
+ * plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
- mutex_lock(&dev->mode_config.mutex);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
goto out;
}
- connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (put_user(obj->properties->ids[i],
+ props_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(obj->properties->values[i],
+ prop_values_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
}
+ arg->count_props = props_count;
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_obj_set_property_ioctl - set the current value of an object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function sets the current value for an object's property. It also calls
+ * into a driver's ->set_property callback to update the hardware state.
+ * Compared to the connector specific ioctl this one is extended to also work on
+ * crtc and plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj) {
+ ret = -ENOENT;
goto out;
}
+ if (!arg_obj->properties)
+ goto out;
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
goto out;
- }
- property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj) {
+ ret = -ENOENT;
goto out;
+ }
+ property = obj_to_property(prop_obj);
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ break;
}
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
+/**
+ * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * @connector: connector to attach
+ * @encoder: encoder to attach @connector to
+ *
+ * This function links up a connector to an encoder. Note that the routing
+ * restrictions between encoders and crtcs are exposed to userspace through the
+ * possible_clones and possible_crtcs bitmasks.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
@@ -2456,53 +4045,70 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-void drm_mode_connector_detach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id) {
- connector->encoder_ids[i] = 0;
- if (connector->encoder == encoder)
- connector->encoder = NULL;
- break;
- }
- }
-}
-EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
-
-bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size)
+/**
+ * drm_mode_crtc_set_gamma_size - set the gamma table size
+ * @crtc: CRTC to set the gamma table size for
+ * @gamma_size: size of the gamma table
+ *
+ * Drivers which support gamma tables should set this to the supported gamma
+ * table size when initializing the CRTC. Currently the drm core only supports a
+ * fixed gamma table size.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size)
{
crtc->gamma_size = gamma_size;
crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
- return false;
+ return -ENOMEM;
}
- return true;
+ return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+/**
+ * drm_mode_gamma_set_ioctl - set the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
+ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
int ret = 0;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
goto out;
}
- crtc = obj_to_crtc(obj);
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
@@ -2529,31 +4135,47 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
+/**
+ * drm_mode_gamma_get_ioctl - get the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Copy the current gamma table into the storage provided. This also provides
+ * the gamma table size the driver expects, which can be used to size the
+ * allocated storage.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
int ret = 0;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
@@ -2580,17 +4202,34 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
goto out;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
+/**
+ * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This schedules an asynchronous update on a given CRTC, called page flip.
+ * Optionally a drm event is generated to signal the completion of the event.
+ * Generic drivers cannot assume that a pageflip with changed framebuffer
+ * properties (including driver specific metadata like tiling layout) will work,
+ * but some drivers support e.g. pixel format changes through the pageflip
+ * ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_page_flip *page_flip = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int ret = -EINVAL;
@@ -2599,19 +4238,41 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
page_flip->reserved != 0)
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj)
+ if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, page_flip->crtc_id);
+ if (!crtc)
+ return -ENOENT;
+
+ drm_modeset_lock(&crtc->mutex, NULL);
+ if (crtc->primary->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = -EBUSY;
goto out;
- crtc = obj_to_crtc(obj);
+ }
if (crtc->funcs->page_flip == NULL)
goto out;
- obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj)
+ fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ if (!fb) {
+ ret = -ENOENT;
goto out;
- fb = obj_to_fb(obj);
+ }
+
+ ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+ if (ret)
+ goto out;
+
+ if (crtc->primary->fb->pixel_format != fb->pixel_format) {
+ DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+ ret = -EINVAL;
+ goto out;
+ }
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
ret = -ENOMEM;
@@ -2640,15 +4301,499 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
(void (*) (struct drm_pending_event *)) kfree;
}
- ret = crtc->funcs->page_flip(crtc, fb, e);
+ old_fb = crtc->primary->fb;
+ ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
if (ret) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+ }
+ /* Keep the old fb, don't unref it. */
+ old_fb = NULL;
+ } else {
+ /*
+ * Warn if the driver hasn't properly updated the crtc->fb
+ * field to reflect that the new framebuffer is now used.
+ * Failing to do so will screw with the reference counting
+ * on framebuffers.
+ */
+ WARN_ON(crtc->primary->fb != fb);
+ /* Unref only the old framebuffer. */
+ fb = NULL;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ drm_modeset_unlock(&crtc->mutex);
+
return ret;
}
+
+/**
+ * drm_mode_config_reset - call ->reset callbacks
+ * @dev: drm device
+ *
+ * This functions calls all the crtc's, encoder's and connector's ->reset
+ * callback. Drivers can use this in e.g. their driver load or resume code to
+ * reset hardware and software state.
+ */
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->status = connector_status_unknown;
+
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+ }
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
+
+/**
+ * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This creates a new dumb buffer in the driver's backing storage manager (GEM,
+ * TTM or something else entirely) and returns the resulting buffer handle. This
+ * handle can then be wrapped up into a framebuffer modeset object.
+ *
+ * Note that userspace is not allowed to use such objects for render
+ * acceleration - drivers must create their own private ioctls for such a use
+ * case.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+ u32 cpp, stride, size;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ if (!args->width || !args->height || !args->bpp)
+ return -EINVAL;
+
+ /* overflow checks for 32bit size calculations */
+ cpp = DIV_ROUND_UP(args->bpp, 8);
+ if (cpp > 0xffffffffU / args->width)
+ return -EINVAL;
+ stride = cpp * args->width;
+ if (args->height > 0xffffffffU / stride)
+ return -EINVAL;
+
+ /* test for wrap-around */
+ size = args->height * stride;
+ if (PAGE_ALIGN(size) == 0)
+ return -EINVAL;
+
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+/**
+ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+/**
+ * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This destroys the userspace handle for the given dumb backing storage buffer.
+ * Since buffer objects must be reference counted in the kernel a buffer object
+ * won't be immediately freed if a framebuffer modeset object still uses it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
+/**
+ * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * @format: pixel format (DRM_FORMAT_*)
+ * @depth: storage for the depth value
+ * @bpp: storage for the bpp value
+ *
+ * This only supports RGB formats here for compat with code that doesn't use
+ * pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format %s\n",
+ drm_get_format_name(format));
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * Returns:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex);
+ drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.bridge_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+
+ drm_modeset_lock_all(dev);
+ drm_mode_create_standard_connector_properties(dev);
+ drm_mode_create_standard_plane_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+ dev->mode_config.num_overlay_plane = 0;
+ dev->mode_config.num_total_plane = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_bridge *bridge, *brt;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(bridge, brt,
+ &dev->mode_config.bridge_list, head) {
+ bridge->funcs->destroy(bridge);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+ head) {
+ drm_property_destroy_blob(dev, blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_remove(fb);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ idr_destroy(&dev->mode_config.crtc_idr);
+ drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4231d6db72e..78b37f3febd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -29,161 +29,75 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
-#include "drmP.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-#include "drm_fb_helper.h"
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
-static void drm_mode_validate_flag(struct drm_connector *connector,
- int flags)
-{
- struct drm_display_mode *mode, *t;
-
- if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
- return;
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
- !(flags & DRM_MODE_FLAG_INTERLACE))
- mode->status = MODE_NO_INTERLACE;
- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
- !(flags & DRM_MODE_FLAG_DBLSCAN))
- mode->status = MODE_NO_DBLESCAN;
- }
-
- return;
-}
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
/**
- * drm_helper_probe_connector_modes - get complete set of display modes
- * @dev: DRM device
- * @maxX: max width for modes
- * @maxY: max height for modes
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
- *
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
*
- * FIXME: take into account monitor limits
- *
- * RETURNS:
- * Number of modes found on @connector.
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
*/
-int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *t;
- struct drm_connector_helper_funcs *connector_funcs =
- connector->helper_private;
- int count = 0;
- int mode_flags = 0;
-
- DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector));
- /* set all modes to the unverified state */
- list_for_each_entry_safe(mode, t, &connector->modes, head)
- mode->status = MODE_UNVERIFIED;
-
- if (connector->force) {
- if (connector->force == DRM_FORCE_ON)
- connector->status = connector_status_connected;
- else
- connector->status = connector_status_disconnected;
- if (connector->funcs->force)
- connector->funcs->force(connector);
- } else
- connector->status = connector->funcs->detect(connector);
-
- if (connector->status == connector_status_disconnected) {
- DRM_DEBUG_KMS("%s is disconnected\n",
- drm_get_connector_name(connector));
- goto prune;
- }
-
- count = (*connector_funcs->get_modes)(connector);
- if (!count) {
- count = drm_add_modes_noedid(connector, 1024, 768);
- if (!count)
- return 0;
- }
-
- drm_mode_connector_list_update(connector);
-
- if (maxX && maxY)
- drm_mode_validate_size(dev, &connector->modes, maxX,
- maxY, 0);
-
- if (connector->interlace_allowed)
- mode_flags |= DRM_MODE_FLAG_INTERLACE;
- if (connector->doublescan_allowed)
- mode_flags |= DRM_MODE_FLAG_DBLSCAN;
- drm_mode_validate_flag(connector, mode_flags);
-
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
- if (mode->status == MODE_OK)
- mode->status = connector_funcs->mode_valid(connector,
- mode);
- }
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
-prune:
- drm_mode_prune_invalid(dev, &connector->modes, true);
+ INIT_LIST_HEAD(&panel_list);
- if (list_empty(&connector->modes))
- return 0;
-
- drm_mode_sort(&connector->modes);
-
- DRM_DEBUG_KMS("Probed modes for %s\n",
- drm_get_connector_name(connector));
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
-
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_debug_printmodeline(mode);
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
}
- return count;
+ list_splice(&panel_list, &dev->mode_config.connector_list);
}
-EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
-
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int count = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- count += drm_helper_probe_single_connector_modes(connector,
- maxX, maxY);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Walk @encoders's DRM device's mode_config and see if it's in use.
+ * Checks whether @encoder is with the current mode setting output configuration
+ * in use by any connector. This doesn't mean that it is actually enabled since
+ * the DPMS state is tracked separately.
*
- * RETURNS:
- * True if @encoder is part of the mode_config, false otherwise.
+ * Returns:
+ * True if @encoder is used, false otherwise.
*/
bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
+
+ /*
+ * We can expect this mutex to be locked if we are not panicking.
+ * Locking is currently fubar in the panic handler.
+ */
+ if (!oops_in_progress) {
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return true;
@@ -195,19 +109,25 @@ EXPORT_SYMBOL(drm_helper_encoder_in_use);
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * Walk @crtc's DRM device's mode_config and see if it's in use.
+ * Checks whether @crtc is with the current mode setting output configuration
+ * in use by any connector. This doesn't mean that it is actually enabled since
+ * the DPMS state is tracked separately.
*
- * RETURNS:
- * True if @crtc is part of the mode_config, false otherwise.
+ * Returns:
+ * True if @crtc is used, false otherwise.
*/
bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- /* FIXME: Locking around list access? */
+
+ /*
+ * We can expect this mutex to be locked if we are not panicking.
+ * Locking is currently fubar in the panic handler.
+ */
+ if (!oops_in_progress)
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
@@ -215,38 +135,34 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
-/**
- * drm_disable_unused_functions - disable unused objects
- * @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
- *
- * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
- * by calling its dpms function, which should power it off.
- */
-void drm_helper_disable_unused_functions(struct drm_device *dev)
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
+ if (encoder_funcs->disable)
+ (*encoder_funcs->disable)(encoder);
+ else
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
+}
+
+static void __drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc *crtc;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- continue;
- if (connector->status == connector_status_disconnected)
- connector->encoder = NULL;
- }
+ drm_warn_on_modeset_not_all_locked(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder_funcs = encoder->helper_private;
if (!drm_helper_encoder_in_use(encoder)) {
- if (encoder_funcs->disable)
- (*encoder_funcs->disable)(encoder);
- else
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
- /* disconnector encoder from any connector */
+ drm_encoder_disable(encoder);
+ /* disconnect encoder from any connector */
encoder->crtc = NULL;
}
}
@@ -255,337 +171,32 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- crtc->fb = NULL;
- }
- }
-}
-EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
- if (!fb_help_conn)
- return false;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
-
- if (!fb_help_conn)
- return mode;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- if (cmdline_mode->specified == false)
- return mode;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- }
- return mode;
- }
-
-create_mode:
- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
- cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->rb, cmdline_mode->interlace,
- cmdline_mode->margins);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- list_add(&mode->head, &connector->modes);
- return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (strict) {
- enable = connector->status == connector_status_connected;
- } else {
- enable = connector->status != connector_status_disconnected;
- }
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- i++;
- }
-
- if (any_enabled)
- return;
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, false);
- i++;
- }
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- if (enabled[i] == false) {
- i++;
- continue;
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(connector, width, height);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- connector->base.id);
- modes[i] = drm_has_preferred_mode(connector, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&connector->modes)) {
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- i++;
- }
- return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
- struct drm_crtc *best_crtc;
- int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
-
- if (n == dev->mode_config.num_connector)
- return 0;
- c = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (c == n)
- break;
- c++;
- }
-
- best_crtcs[n] = NULL;
- best_crtc = NULL;
- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kmalloc(dev->mode_config.num_connector *
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(connector))
- my_score++;
- if (drm_has_preferred_mode(connector, width, height))
- my_score++;
-
- connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
- if (!encoder)
- goto out;
-
- connector->encoder = encoder;
-
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
- c = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
- c++;
- continue;
- }
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning for now */
- c++;
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_crtc = crtc;
- best_score = score;
- memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
- sizeof(struct drm_crtc *));
- }
- c++;
- }
-out:
- kfree(crtcs);
- return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
- struct drm_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- bool *enabled;
- int width, height;
- int i, ret;
-
- DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
- crtcs = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
- sizeof(bool), GFP_KERNEL);
-
- drm_enable_connectors(dev, enabled);
-
- ret = drm_target_preferred(dev, modes, enabled, width, height);
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_display_mode *mode = modes[i];
- struct drm_crtc *crtc = crtcs[i];
-
- if (connector->encoder == NULL) {
- i++;
- continue;
- }
-
- if (mode && crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, crtc->base.id);
- crtc->desired_mode = mode;
- connector->encoder->crtc = crtc;
- } else {
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
+ if (crtc_funcs->disable)
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+ crtc->primary->fb = NULL;
}
- i++;
}
-
- kfree(crtcs);
- kfree(modes);
- kfree(enabled);
}
/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
*
- * Return false if @encoder can't be driven by @crtc, true otherwise.
+ * This function walks through the entire mode setting configuration of @dev. It
+ * will remove any crtc links of unused encoders and encoder links of
+ * disconnected connectors. Then it will disable all unused encoders and crtcs
+ * either by calling their disable callback if available or by calling their
+ * dpms callback with DRM_MODE_DPMS_OFF.
*/
-static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *crtc)
+void drm_helper_disable_unused_functions(struct drm_device *dev)
{
- struct drm_device *dev;
- struct drm_crtc *tmp;
- int crtc_mask = 1;
-
- WARN(!crtc, "checking null crtc?");
-
- dev = crtc->dev;
-
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
- if (tmp == crtc)
- break;
- crtc_mask <<= 1;
- }
-
- if (encoder->possible_crtcs & crtc_mask)
- return true;
- return false;
+ drm_modeset_lock_all(dev);
+ __drm_helper_disable_unused_functions(dev);
+ drm_modeset_unlock_all(dev);
}
+EXPORT_SYMBOL(drm_helper_disable_unused_functions);
/*
* Check the CRTC we're going to map each output to vs. its current
@@ -602,29 +213,33 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
/* Disable encoders whose CRTC is about to change */
if (encoder_funcs->get_crtc &&
encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
}
}
/**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
- * @x: width of mode
- * @y: height of mode
- *
- * LOCKING:
- * Caller must hold mode config lock.
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
*
- * RETURNS:
- * True if the mode was set successfully, or false otherwise.
+ * Returns:
+ * True if the mode was set successfully, false otherwise.
*/
bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
@@ -636,16 +251,23 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
+ bool saved_enabled;
struct drm_encoder *encoder;
bool ret = true;
- adjusted_mode = drm_mode_duplicate(dev, mode);
+ drm_warn_on_modeset_not_all_locked(dev);
+ saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
-
if (!crtc->enabled)
return true;
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode) {
+ crtc->enabled = saved_enabled;
+ return false;
+ }
+
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -665,25 +287,45 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
+ ret = encoder->bridge->funcs->mode_fixup(
+ encoder->bridge, mode, adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("Bridge fixup failed\n");
+ goto done;
+ }
+ }
+
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
goto done;
}
}
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
}
drm_crtc_prepare_encoders(dev);
@@ -702,10 +344,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
- DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
- mode->name, mode->base.id);
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, encoder->name,
+ mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_set)
+ encoder->bridge->funcs->mode_set(encoder->bridge, mode,
+ adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -716,16 +363,30 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+ if (encoder->bridge)
+ encoder->bridge->funcs->pre_enable(encoder->bridge);
+
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
+ if (encoder->bridge)
+ encoder->bridge->funcs->enable(encoder->bridge);
}
- /* XXX free adjustedmode */
- drm_mode_destroy(dev, adjusted_mode);
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc, &crtc->hwmode);
+
/* FIXME: add subpixel order */
done:
+ drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
+ crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
@@ -735,89 +396,115 @@ done:
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+static void
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+
+ /*
+ * drm_helper_disable_unused_functions() ought to be
+ * doing this, but since we've decoupled the encoder
+ * from the connector above, the required connection
+ * between them is henceforth no longer available.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ }
+ }
+
+ __drm_helper_disable_unused_functions(dev);
+}
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
- *
- * LOCKING:
- * Caller must hold mode config lock.
+ * @set: mode set configuration
*
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev support code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
*
- * RETURNS:
- * Zero. (FIXME)
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
- struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+ struct drm_crtc *new_crtc;
struct drm_encoder *save_encoders, *new_encoder, *encoder;
- struct drm_framebuffer *old_fb = NULL;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
- int ret = 0;
+ struct drm_mode_set save_set;
+ int ret;
+ int i;
DRM_DEBUG_KMS("\n");
- if (!set)
- return -EINVAL;
-
- if (!set->crtc)
- return -EINVAL;
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
- if (!set->crtc->helper_private)
- return -EINVAL;
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
crtc_funcs = set->crtc->helper_private;
- DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:"
- " %d (x, y) (%i, %i)\n",
- set->crtc, set->crtc->base.id, set->fb, set->connectors,
- (int)set->num_connectors, set->x, set->y);
+ if (!set->mode)
+ set->fb = NULL;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ drm_crtc_helper_disable(set->crtc);
+ return 0;
+ }
dev = set->crtc->dev;
- /* Allocate space for the backup of all (non-pointer) crtc, encoder and
- * connector data. */
- save_crtcs = kzalloc(dev->mode_config.num_crtc *
- sizeof(struct drm_crtc), GFP_KERNEL);
- if (!save_crtcs)
- return -ENOMEM;
+ drm_warn_on_modeset_not_all_locked(dev);
+ /*
+ * Allocate space for the backup of all (non-pointer) encoder and
+ * connector data.
+ */
save_encoders = kzalloc(dev->mode_config.num_encoder *
sizeof(struct drm_encoder), GFP_KERNEL);
- if (!save_encoders) {
- kfree(save_crtcs);
+ if (!save_encoders)
return -ENOMEM;
- }
save_connectors = kzalloc(dev->mode_config.num_connector *
sizeof(struct drm_connector), GFP_KERNEL);
if (!save_connectors) {
- kfree(save_crtcs);
kfree(save_encoders);
return -ENOMEM;
}
- /* Copy data. Note that driver private data is not affected.
+ /*
+ * Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- save_crtcs[count++] = *crtc;
- }
-
- count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
save_encoders[count++] = *encoder;
}
@@ -827,20 +514,25 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
save_connectors[count++] = *connector;
}
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->primary->fb;
+
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->crtc->fb != set->fb) {
+ if (set->crtc->primary->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
- if (set->crtc->fb == NULL) {
+ if (set->crtc->primary->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
- } else if ((set->fb->bits_per_pixel !=
- set->crtc->fb->bits_per_pixel) ||
- set->fb->depth != set->crtc->fb->depth)
- fb_changed = true;
- else
+ } else if (set->fb->pixel_format !=
+ set->crtc->primary->fb->pixel_format) {
+ mode_changed = true;
+ } else
fb_changed = true;
}
@@ -868,6 +560,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (new_encoder == NULL)
/* don't break so fail path works correct */
fail = 1;
+
+ if (connector->dpms != DRM_MODE_DPMS_ON) {
+ DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ mode_changed = true;
+ }
+
break;
}
}
@@ -915,8 +613,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
- DRM_DEBUG_KMS("setting connector %d crtc to %p\n",
- connector->base.id, new_crtc);
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.id, connector->name,
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, connector->name);
+ }
}
/* mode_set_base is not a required function */
@@ -924,53 +628,49 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
if (mode_changed) {
- old_fb = set->crtc->fb;
- set->crtc->fb = set->fb;
- set->crtc->enabled = (set->mode != NULL);
- if (set->mode != NULL) {
+ if (drm_helper_crtc_in_use(set->crtc)) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
+ set->crtc->primary->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
- old_fb)) {
- DRM_ERROR("failed to set mode on crtc %p\n",
- set->crtc);
+ save_set.fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
- /* TODO are these needed? */
- set->crtc->desired_x = set->x;
- set->crtc->desired_y = set->y;
- set->crtc->desired_mode = set->mode;
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ set->connectors[i]->name);
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ }
}
- drm_helper_disable_unused_functions(dev);
+ __drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
-
- old_fb = set->crtc->fb;
- if (set->crtc->fb != set->fb)
- set->crtc->fb = set->fb;
+ set->crtc->primary->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
- set->x, set->y, old_fb);
- if (ret != 0)
+ set->x, set->y, save_set.fb);
+ if (ret != 0) {
+ set->crtc->x = save_set.x;
+ set->crtc->y = save_set.y;
+ set->crtc->primary->fb = save_set.fb;
goto fail;
+ }
}
kfree(save_connectors);
kfree(save_encoders);
- kfree(save_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- *crtc = save_crtcs[count++];
- }
-
- count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
*encoder = save_encoders[count++];
}
@@ -980,69 +680,18 @@ fail:
*connector = save_connectors[count++];
}
+ /* Try to restore the config */
+ if (mode_changed &&
+ !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
+ save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
kfree(save_connectors);
kfree(save_encoders);
- kfree(save_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
- DRM_DEBUG_KMS("\n");
-
- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- /* FIXME: send hotplug event */
- return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
- int count = 0;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- drm_fb_helper_parse_command_line(dev);
-
- count = drm_helper_probe_connector_modes(dev,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- /*
- * we shouldn't end up with no modes here.
- */
- WARN(!count, "No connectors reported connected with modes\n");
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -1056,6 +705,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
return dpms;
}
+/* Helper which handles bridge ordering around encoder dpms */
+static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_bridge *bridge = encoder->bridge;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ if (bridge) {
+ if (mode == DRM_MODE_DPMS_ON)
+ bridge->funcs->pre_enable(bridge);
+ else
+ bridge->funcs->disable(bridge);
+ }
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ encoder_funcs->dpms(encoder, mode);
+
+ if (bridge) {
+ if (mode == DRM_MODE_DPMS_ON)
+ bridge->funcs->enable(bridge);
+ else
+ bridge->funcs->post_disable(bridge);
+ }
+}
+
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -1070,18 +744,20 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
*
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- int old_dpms;
+ int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
return;
@@ -1089,6 +765,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
old_dpms = connector->dpms;
connector->dpms = mode;
+ if (encoder)
+ encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
+
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
@@ -1097,22 +776,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
+ if (encoder)
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
}
/* from on to off, do encoder then crtc */
if (mode > old_dpms) {
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
+ if (encoder)
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
@@ -1126,57 +797,93 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
EXPORT_SYMBOL(drm_helper_connector_dpms);
/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
+ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @fb: drm_framebuffer object to fill out
+ * @mode_cmd: metadata from the userspace fb creation request
*
- * RETURNS:
- * Zero on success, errno on failure.
+ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
+ * metadata fields.
*/
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
- drm_helper_plugged_event(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
+ int i;
-int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd)
-{
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
- fb->pitch = mode_cmd->pitch;
- fb->bits_per_pixel = mode_cmd->bpp;
- fb->depth = mode_cmd->depth;
-
- return 0;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-int drm_helper_resume_force_mode(struct drm_device *dev)
+/**
+ * drm_helper_resume_force_mode - force-restore mode setting configuration
+ * @dev: drm_device which should be restored
+ *
+ * Drivers which use the mode setting helpers can use this function to
+ * force-restore the mode setting configuration e.g. on resume or when something
+ * else might have trampled over the hw state (like some overzealous old BIOSen
+ * tended to do).
+ *
+ * This helper doesn't provide a error return value since restoring the old
+ * config should never fail due to resource allocation issues since the driver
+ * has successfully set the restored configuration already. Hence this should
+ * boil down to the equivalent of a few dpms on calls, which also don't provide
+ * an error code.
+ *
+ * Drivers where simply restoring an old configuration again might fail (e.g.
+ * due to slight differences in allocating shared resources when the
+ * configuration is restored in a different order than when userspace set it up)
+ * need to use their own restore logic.
+ */
+void drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
- int ret;
+ struct drm_encoder *encoder;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ int encoder_dpms;
+ bool ret;
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->enabled)
continue;
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ crtc->x, crtc->y, crtc->primary->fb);
+ /* Restoring the old config should never fail! */
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_dpms = drm_helper_choose_encoder_dpms(
+ encoder);
+
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
+ }
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
}
+
/* disable the unused connectors while restoring the modesetting */
- drm_helper_disable_unused_functions(dev);
- return 0;
+ __drm_helper_disable_unused_functions(dev);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
new file mode 100644
index 00000000000..a2945ee6d67
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This header file contains mode setting related functions and definitions
+ * which are only used within the drm module as internal implementation details
+ * and are not exported to drivers.
+ */
+
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type);
+void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object);
+
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9903f270e44..b4b51d46f33 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,7 +32,9 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include "drmP.h"
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
#if defined(CONFIG_DEBUG_FS)
@@ -40,14 +42,12 @@
* Initialization, etc.
**************************************************/
-static struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
@@ -84,13 +84,12 @@ static const struct file_operations drm_debugfs_fops = {
* Create a given set of debugfs files represented by an array of
* gdm_debugfs_lists in the given root directory.
*/
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
+int drm_debugfs_create_files(const struct drm_info_list *files, int count,
struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct dentry *ent;
struct drm_info_node *tmp;
- char name[64];
int i, ret;
for (i = 0; i < count; i++) {
@@ -109,7 +108,7 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
root, tmp, &drm_debugfs_fops);
if (!ent) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
- name, files[i].name);
+ root->d_name.name, files[i].name);
kfree(tmp);
ret = -1;
goto fail;
@@ -118,7 +117,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
tmp->minor = minor;
tmp->dent = ent;
tmp->info_ent = &files[i];
- list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&tmp->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
}
return 0;
@@ -146,7 +148,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
char name[64];
int ret;
- INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+ INIT_LIST_HEAD(&minor->debugfs_list);
+ mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
if (!minor->debugfs_root) {
@@ -185,15 +188,16 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
*
* Remove all debugfs entries created by debugfs_init().
*/
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;
struct drm_info_node *tmp;
int i;
+ mutex_lock(&minor->debugfs_lock);
for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+ list_for_each_safe(pos, q, &minor->debugfs_list) {
tmp = list_entry(pos, struct drm_info_node, list);
if (tmp->info_ent == &files[i]) {
debugfs_remove(tmp->dent);
@@ -202,6 +206,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
}
}
}
+ mutex_unlock(&minor->debugfs_lock);
return 0;
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 13f1537413f..8a140a95375 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -33,7 +33,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <linux/export.h>
+#include <drm/drmP.h>
/**
* Initialize the DMA data.
@@ -43,16 +44,22 @@
*
* Allocate and initialize a drm_device_dma structure.
*/
-int drm_dma_setup(struct drm_device *dev)
+int drm_legacy_dma_setup(struct drm_device *dev)
{
int i;
- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ return 0;
+ }
+
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
- memset(dev->dma, 0, sizeof(*dev->dma));
-
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
@@ -67,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
-void drm_dma_takedown(struct drm_device *dev)
+void drm_legacy_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ return;
+ }
+
if (!dma)
return;
@@ -121,11 +133,6 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
- && waitqueue_active(&buf->dma_wait)) {
- wake_up_interruptible(&buf->dma_wait);
- }
}
/**
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
new file mode 100644
index 00000000000..08e33b8b13a
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -0,0 +1,770 @@
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drmP.h>
+
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapter that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ *
+ * IMPORTANT:
+ * This interface is deprecated, please switch to the new dp aux helpers and
+ * drm_dp_aux_register().
+ */
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+ error = i2c_add_adapter(adapter);
+ return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
+
+/**
+ * DOC: dp helpers
+ *
+ * The DisplayPort AUX channel is an abstraction to allow generic, driver-
+ * independent access to AUX functionality. Drivers can take advantage of
+ * this by filling in the fields of the drm_dp_aux structure.
+ *
+ * Transactions are described using a hardware-independent drm_dp_aux_msg
+ * structure, which is passed into a driver's .transfer() implementation.
+ * Both native and I2C-over-AUX transactions are supported.
+ */
+
+static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_dp_aux_msg msg;
+ unsigned int retry;
+ int err;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.address = offset;
+ msg.request = request;
+ msg.buffer = buffer;
+ msg.size = size;
+
+ /*
+ * The specification doesn't give any recommendation on how often to
+ * retry native transactions, so retry 7 times like for I2C-over-AUX
+ * transactions.
+ */
+ for (retry = 0; retry < 7; retry++) {
+
+ mutex_lock(&aux->hw_mutex);
+ err = aux->transfer(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
+ if (err < 0) {
+ if (err == -EBUSY)
+ continue;
+
+ return err;
+ }
+
+
+ switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
+ if (err < size)
+ return -EPROTO;
+ return err;
+
+ case DP_AUX_NATIVE_REPLY_NACK:
+ return -EIO;
+
+ case DP_AUX_NATIVE_REPLY_DEFER:
+ usleep_range(400, 500);
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("too many retries, giving up\n");
+ return -EIO;
+}
+
+/**
+ * drm_dp_dpcd_read() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Returns the number of bytes transferred on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size)
+{
+ return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
+ size);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_read);
+
+/**
+ * drm_dp_dpcd_write() - write a series of bytes to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Returns the number of bytes transferred on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size)
+{
+ return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
+ size);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_write);
+
+/**
+ * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207)
+ * @aux: DisplayPort AUX channel
+ * @status: buffer to store the link status in (must be at least 6 bytes)
+ *
+ * Returns the number of bytes transferred on success or a negative error
+ * code on failure.
+ */
+int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[3];
+ int err;
+
+ memset(link, 0, sizeof(*link));
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ link->revision = values[0];
+ link->rate = drm_dp_bw_code_to_link_rate(values[1]);
+ link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
+
+ if (values[2] & DP_ENHANCED_FRAME_CAP)
+ link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_probe);
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2];
+ int err;
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->num_lanes;
+
+ if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_configure);
+
+/*
+ * I2C-over-AUX implementation
+ */
+
+static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+/*
+ * Transfer a single I2C-over-AUX message and handle various error conditions,
+ * retrying the transaction as appropriate. It is assumed that the
+ * aux->transfer function does not modify anything in the msg other than the
+ * reply field.
+ */
+static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ unsigned int retry;
+ int err;
+
+ /*
+ * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
+ * is required to retry at least seven times upon receiving AUX_DEFER
+ * before giving up the AUX transaction.
+ */
+ for (retry = 0; retry < 7; retry++) {
+ mutex_lock(&aux->hw_mutex);
+ err = aux->transfer(aux, msg);
+ mutex_unlock(&aux->hw_mutex);
+ if (err < 0) {
+ if (err == -EBUSY)
+ continue;
+
+ DRM_DEBUG_KMS("transaction failed: %d\n", err);
+ return err;
+ }
+
+
+ switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
+ /*
+ * For I2C-over-AUX transactions this isn't enough, we
+ * need to check for the I2C ACK reply.
+ */
+ break;
+
+ case DP_AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("native nack\n");
+ return -EREMOTEIO;
+
+ case DP_AUX_NATIVE_REPLY_DEFER:
+ DRM_DEBUG_KMS("native defer");
+ /*
+ * We could check for I2C bit rate capabilities and if
+ * available adjust this interval. We could also be
+ * more careful with DP-to-legacy adapters where a
+ * long legacy cable may force very low I2C bit rates.
+ *
+ * For now just defer for long enough to hopefully be
+ * safe for all use-cases.
+ */
+ usleep_range(500, 600);
+ continue;
+
+ default:
+ DRM_ERROR("invalid native reply %#04x\n", msg->reply);
+ return -EREMOTEIO;
+ }
+
+ switch (msg->reply & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
+ /*
+ * Both native ACK and I2C ACK replies received. We
+ * can assume the transfer was successful.
+ */
+ if (err < msg->size)
+ return -EPROTO;
+ return 0;
+
+ case DP_AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("I2C nack\n");
+ return -EREMOTEIO;
+
+ case DP_AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("I2C defer\n");
+ usleep_range(400, 500);
+ continue;
+
+ default:
+ DRM_ERROR("invalid I2C reply %#04x\n", msg->reply);
+ return -EREMOTEIO;
+ }
+ }
+
+ DRM_DEBUG_KMS("too many retries, giving up\n");
+ return -EREMOTEIO;
+}
+
+static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct drm_dp_aux *aux = adapter->algo_data;
+ unsigned int i, j;
+ struct drm_dp_aux_msg msg;
+ int err = 0;
+
+ memset(&msg, 0, sizeof(msg));
+
+ for (i = 0; i < num; i++) {
+ msg.address = msgs[i].addr;
+ msg.request = (msgs[i].flags & I2C_M_RD) ?
+ DP_AUX_I2C_READ :
+ DP_AUX_I2C_WRITE;
+ msg.request |= DP_AUX_I2C_MOT;
+ /* Send a bare address packet to start the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.buffer = NULL;
+ msg.size = 0;
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ if (err < 0)
+ break;
+ /*
+ * Many hardware implementations support FIFOs larger than a
+ * single byte, but it has been empirically determined that
+ * transferring data in larger chunks can actually lead to
+ * decreased performance. Therefore each message is simply
+ * transferred byte-by-byte.
+ */
+ for (j = 0; j < msgs[i].len; j++) {
+ msg.buffer = msgs[i].buf + j;
+ msg.size = 1;
+
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ if (err < 0)
+ break;
+ }
+ if (err < 0)
+ break;
+ }
+ if (err >= 0)
+ err = num;
+ /* Send a bare address packet to close out the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.request &= ~DP_AUX_I2C_MOT;
+ msg.buffer = NULL;
+ msg.size = 0;
+ (void)drm_dp_i2c_do_msg(aux, &msg);
+
+ return err;
+}
+
+static const struct i2c_algorithm drm_dp_i2c_algo = {
+ .functionality = drm_dp_i2c_functionality,
+ .master_xfer = drm_dp_i2c_xfer,
+};
+
+/**
+ * drm_dp_aux_register() - initialise and register aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register(struct drm_dp_aux *aux)
+{
+ mutex_init(&aux->hw_mutex);
+
+ aux->ddc.algo = &drm_dp_i2c_algo;
+ aux->ddc.algo_data = aux;
+ aux->ddc.retries = 3;
+
+ aux->ddc.class = I2C_CLASS_DDC;
+ aux->ddc.owner = THIS_MODULE;
+ aux->ddc.dev.parent = aux->dev;
+ aux->ddc.dev.of_node = aux->dev->of_node;
+
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+ sizeof(aux->ddc.name));
+
+ return i2c_add_adapter(&aux->ddc);
+}
+EXPORT_SYMBOL(drm_dp_aux_register);
+
+/**
+ * drm_dp_aux_unregister() - unregister an AUX adapter
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_aux_unregister(struct drm_dp_aux *aux)
+{
+ i2c_del_adapter(&aux->ddc);
+}
+EXPORT_SYMBOL(drm_dp_aux_unregister);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
deleted file mode 100644
index 548887c8506..00000000000
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright © 2009 Keith Packard
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/i2c.h>
-#include "drm_dp_helper.h"
-#include "drmP.h"
-
-/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-static int
-i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- ret = (*algo_data->aux_ch)(adapter, mode,
- write_byte, read_byte);
- return ret;
-}
-
-/*
- * I2C over AUX CH
- */
-
-/*
- * Send the address. If the I2C link is running, this 'restarts'
- * the connection with the new address, this is used for doing
- * a write followed by a read (as needed for DDC)
- */
-static int
-i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_START;
- int ret;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- algo_data->address = address;
- algo_data->running = true;
- ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- return ret;
-}
-
-/*
- * Stop the I2C transaction. This closes out the link, sending
- * a bare address packet with the MOT bit turned off
- */
-static void
-i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_STOP;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- if (algo_data->running) {
- (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- algo_data->running = false;
- }
-}
-
-/*
- * Write a single byte to the current I2C address, the
- * the I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
- return ret;
-}
-
-/*
- * Read a single byte from the current I2C address, the
- * I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
- return ret;
-}
-
-static int
-i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs,
- int num)
-{
- int ret = 0;
- bool reading = false;
- int m;
- int b;
-
- for (m = 0; m < num; m++) {
- u16 len = msgs[m].len;
- u8 *buf = msgs[m].buf;
- reading = (msgs[m].flags & I2C_M_RD) != 0;
- ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
- if (ret < 0)
- break;
- if (reading) {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
- if (ret < 0)
- break;
- }
- } else {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
- if (ret < 0)
- break;
- }
- }
- if (ret < 0)
- break;
- }
- if (ret >= 0)
- ret = num;
- i2c_algo_dp_aux_stop(adapter, reading);
- DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
- return ret;
-}
-
-static u32
-i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA |
- I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
- I2C_FUNC_10BIT_ADDR;
-}
-
-static const struct i2c_algorithm i2c_dp_aux_algo = {
- .master_xfer = i2c_algo_dp_aux_xfer,
- .functionality = i2c_algo_dp_aux_functionality,
-};
-
-static void
-i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
-{
- (void) i2c_algo_dp_aux_address(adapter, 0, false);
- (void) i2c_algo_dp_aux_stop(adapter, false);
-
-}
-
-static int
-i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
-{
- adapter->algo = &i2c_dp_aux_algo;
- adapter->retries = 3;
- i2c_dp_aux_reset_bus(adapter);
- return 0;
-}
-
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
-{
- int error;
-
- error = i2c_dp_aux_prepare_bus(adapter);
- if (error)
- return error;
- error = i2c_add_adapter(adapter);
- return error;
-}
-EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
deleted file mode 100644
index c53c9768cc1..00000000000
--- a/drivers/gpu/drm/drm_drawable.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- unsigned long irqflags;
- struct drm_draw *draw = data;
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
- if (ret == -EAGAIN) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- goto again;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- draw->handle = new_id;
-
- DRM_DEBUG("%d\n", draw->handle);
-
- return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_draw *draw = data;
- unsigned long irqflags;
- struct drm_drawable_info *info;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- info = drm_get_drawable_info(dev, draw->handle);
- if (info == NULL) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- return -EINVAL;
- }
- kfree(info->rects);
- kfree(info);
-
- idr_remove(&dev->drw_idr, draw->handle);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw->handle);
- return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_update_draw *update = data;
- unsigned long irqflags;
- struct drm_clip_rect *rects;
- struct drm_drawable_info *info;
- int err;
-
- info = idr_find(&dev->drw_idr, update->handle);
- if (!info) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
- DRM_ERROR("No such drawable %d\n", update->handle);
- kfree(info);
- return -EINVAL;
- }
- }
-
- switch (update->type) {
- case DRM_DRAWABLE_CLIPRECTS:
- if (update->num == 0)
- rects = NULL;
- else if (update->num != info->num_rects) {
- rects = kmalloc(update->num *
- sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- } else
- rects = info->rects;
-
- if (update->num && !rects) {
- DRM_ERROR("Failed to allocate cliprect memory\n");
- err = -ENOMEM;
- goto error;
- }
-
- if (update->num && DRM_COPY_FROM_USER(rects,
- (struct drm_clip_rect __user *)
- (unsigned long)update->data,
- update->num *
- sizeof(*rects))) {
- DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = -EFAULT;
- goto error;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (rects != info->rects) {
- kfree(info->rects);
- }
-
- info->rects = rects;
- info->num_rects = update->num;
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update->handle);
- break;
- default:
- DRM_ERROR("Invalid update type %d\n", update->type);
- return -EINVAL;
- }
-
- return 0;
-
-error:
- if (rects != info->rects)
- kfree(rects);
-
- return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
- return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
- struct drm_drawable_info *info = p;
-
- if (info) {
- kfree(info->rects);
- kfree(info);
- }
-
- return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
- idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
- idr_remove_all(&dev->drw_idr);
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 766c46875a2..8218078b613 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -47,22 +47,29 @@
*/
#include <linux/debugfs.h>
-#include "drmP.h"
-#include "drm_core.h"
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
/** Ioctl table */
-static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+static const struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -81,14 +88,14 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
@@ -100,8 +107,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -116,204 +122,68 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
- int i;
-
- DRM_DEBUG("\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
-
- if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- /* Free drawable information memory */
- drm_drawable_free_all(dev);
- del_timer(&dev->timer);
-
- /* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_agp_mem *entry, *tempe;
-
- /* Remove AGP resources, but leave dev->agp
- intact until drv_cleanup is called. */
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
-
- /* Clear vma list (only built for debugging) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
- for (i = 0; i < dev->queue_count; i++) {
- kfree(dev->queuelist[i]);
- dev->queuelist[i] = NULL;
- }
- kfree(dev->queuelist);
- dev->queuelist = NULL;
- }
- dev->queue_count = 0;
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_dma_takedown(dev);
-
- dev->dev_mapping = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
-}
-
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the AGP device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
- DRM_DEBUG("\n");
-
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_get_dev(pdev, pid, driver);
- }
- }
- return 0;
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
- .open = drm_stub_open
+ .open = drm_stub_open,
+ .llseek = noop_llseek,
};
static int __init drm_core_init(void)
{
int ret = -ENOMEM;
+ drm_global_init();
+ drm_connector_ida_init();
idr_init(&drm_minors_idr);
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -326,13 +196,6 @@ static int __init drm_core_init(void)
goto err_p2;
}
- drm_proc_root = proc_mkdir("dri", NULL);
- if (!drm_proc_root) {
- DRM_ERROR("Cannot create /proc/dri\n");
- ret = -1;
- goto err_p3;
- }
-
drm_debugfs_root = debugfs_create_dir("dri", NULL);
if (!drm_debugfs_root) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -355,12 +218,12 @@ err_p1:
static void __exit drm_core_exit(void)
{
- remove_proc_entry("dri", NULL);
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");
+ drm_connector_ida_destroy();
idr_destroy(&drm_minors_idr);
}
@@ -423,6 +286,45 @@ static int drm_version(struct drm_device *dev, void *data,
}
/**
+ * drm_ioctl_permit - Check ioctl permissions against caller
+ *
+ * @flags: ioctl permission flags.
+ * @file_priv: Pointer to struct drm_file identifying the caller.
+ *
+ * Checks whether the caller is allowed to run an ioctl with the
+ * indicated permissions. If so, returns zero. Otherwise returns an
+ * error code suitable for ioctl return.
+ */
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+{
+ /* ROOT_ONLY is only for CAP_SYS_ADMIN */
+ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
+ return -EACCES;
+
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
+
+ /* MASTER is only for master or control clients */
+ if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
+ !drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Control clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
+ drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Render clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
+ drm_is_render_client(file_priv)))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
* Called whenever a process performs an ioctl on /dev/drm.
*
* \param inode device inode.
@@ -439,105 +341,131 @@ long drm_ioctl(struct file *filp,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
- struct drm_ioctl_desc *ioctl;
+ const struct drm_ioctl_desc *ioctl = NULL;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
+ unsigned int usize, asize;
dev = file_priv->minor->dev;
- atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
- ++file_priv->ioctl_count;
- DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
- task_pid_nr(current), cmd, nr,
- (long)old_encode_dev(file_priv->minor->device),
- file_priv->authenticated);
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ u32 drv_size;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ drv_size = _IOC_SIZE(ioctl->cmd_drv);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+ cmd = ioctl->cmd_drv;
+ }
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ u32 drv_size;
+
ioctl = &drm_ioctls[nr];
+
+ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+
cmd = ioctl->cmd;
} else
goto err_i1;
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, ioctl->name);
+
/* Do not trust userspace, use our own definition */
func = ioctl->func;
- /* is there a local override? */
- if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
- func = dev->driver->dma_ioctl;
- if (!func) {
+ if (unlikely(!func)) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
- } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
- retcode = -EACCES;
- } else {
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
- }
- }
+ goto err_i1;
+ }
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- _IOC_SIZE(cmd)) != 0) {
- retcode = -EFAULT;
+ retcode = drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(retcode))
+ goto err_i1;
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
goto err_i1;
}
}
- if (ioctl->flags & DRM_UNLOCKED)
- retcode = func(dev, kdata, file_priv);
- else {
- lock_kernel();
- retcode = func(dev, kdata, file_priv);
- unlock_kernel();
- }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- _IOC_SIZE(cmd)) != 0)
- retcode = -EFAULT;
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg,
+ usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
}
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ mutex_lock(&drm_global_mutex);
+ retcode = func(dev, kdata, file_priv);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ if (cmd & IOC_OUT) {
+ if (copy_to_user((void __user *)arg, kdata,
+ usize) != 0)
+ retcode = -EFAULT;
}
err_i1:
+ if (!ioctl)
+ DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, cmd, nr);
+
if (kdata != stack_kdata)
kfree(kdata);
- atomic_dec(&dev->ioctl_count);
if (retcode)
- DRM_DEBUG("ret = %x\n", retcode);
+ DRM_DEBUG("ret = %d\n", retcode);
return retcode;
}
-
EXPORT_SYMBOL(drm_ioctl);
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
+/**
+ * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ *
+ * @nr: Ioctl number.
+ * @flags: Where to return the ioctl permission flags
+ */
+bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
+ if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
+ (nr < DRM_COMMAND_BASE)) {
+ *flags = drm_ioctls[nr].flags;
+ return true;
}
- return NULL;
+
+ return false;
}
-EXPORT_SYMBOL(drm_getsarea);
+EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index defcaf10846..dfa9769b26b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
@@ -27,15 +28,20 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdmi.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include "drmP.h"
-#include "drm_edid.h"
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
+#define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+ ((edid)->version == (maj) && (edid)->revision > (min)))
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
@@ -60,15 +66,28 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* define the number of Extension EDID block */
-#define MAX_EDID_EXT_NUM 4
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+/* Force 8bpc */
+#define EDID_QUIRK_FORCE_8BPC (1 << 8)
+/* Force 12bpc */
+#define EDID_QUIRK_FORCE_12BPC (1 << 9)
+
+struct detailed_mode_closure {
+ struct drm_connector *connector;
+ struct edid *edid;
+ bool preferred;
+ u32 quirks;
+ int modes;
+};
#define LEVEL_DMT 0
#define LEVEL_GTF 1
-#define LEVEL_CVT 2
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
static struct edid_quirk {
- char *vendor;
+ char vendor[4];
int product_id;
u32 quirks;
} edid_quirk_list[] = {
@@ -85,6 +104,8 @@ static struct edid_quirk {
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
@@ -105,162 +126,25 @@ static struct edid_quirk {
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
-};
-
-
-/* Valid EDID header has these bytes */
-static const u8 edid_header[] = {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
-};
-
-/**
- * edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
- */
-static bool edid_is_valid(struct edid *edid)
-{
- int i, score = 0;
- u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
-
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
-
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
-
- for (i = 0; i < EDID_LENGTH; i++)
- csum += raw_edid[i];
- if (csum) {
- DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
- goto bad;
- }
-
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
- }
-
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
-
- return 1;
-
-bad:
- if (raw_edid) {
- DRM_ERROR("Raw EDID:\n");
- print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk("\n");
- }
- return 0;
-}
-
-/**
- * edid_vendor - match a string against EDID's obfuscated vendor field
- * @edid: EDID to match
- * @vendor: vendor string
- *
- * Returns true if @vendor is in @edid, false otherwise
- */
-static bool edid_vendor(struct edid *edid, char *vendor)
-{
- char edid_vendor[3];
-
- edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
- edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
- ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
- edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
-
- return !strncmp(edid_vendor, vendor, 3);
-}
-
-/**
- * edid_get_quirks - return quirk flags for a given EDID
- * @edid: EDID to process
- *
- * This tells subsequent routines what fixes they need to apply.
- */
-static u32 edid_get_quirks(struct edid *edid)
-{
- struct edid_quirk *quirk;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
- quirk = &edid_quirk_list[i];
-
- if (edid_vendor(edid, quirk->vendor) &&
- (EDID_PRODUCT_ID(edid) == quirk->product_id))
- return quirk->quirks;
- }
-
- return 0;
-}
-
-#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
-#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
-
-/**
- * edid_fixup_preferred - set preferred modes based on quirk list
- * @connector: has mode list to fix up
- * @quirks: quirks list
- *
- * Walk the mode list for @connector, clearing the preferred status
- * on existing modes and setting it anew for the right mode ala @quirks.
- */
-static void edid_fixup_preferred(struct drm_connector *connector,
- u32 quirks)
-{
- struct drm_display_mode *t, *cur_mode, *preferred_mode;
- int target_refresh = 0;
-
- if (list_empty(&connector->probed_modes))
- return;
-
- if (quirks & EDID_QUIRK_PREFER_LARGE_60)
- target_refresh = 60;
- if (quirks & EDID_QUIRK_PREFER_LARGE_75)
- target_refresh = 75;
-
- preferred_mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode, head);
-
- list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
- cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
+ { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
- if (cur_mode == preferred_mode)
- continue;
-
- /* Largest mode is preferred */
- if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
- preferred_mode = cur_mode;
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
- /* At a given size, try to get closest to target refresh */
- if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
- MODE_REFRESH_DIFF(cur_mode, target_refresh) <
- MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
- preferred_mode = cur_mode;
- }
- }
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
- preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
-}
+ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+};
/*
- * Add the Autogenerated from the DMT spec.
+ * Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
*/
-static struct drm_display_mode drm_dmt_modes[] = {
+static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
@@ -309,12 +193,16 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -332,12 +220,20 @@ static struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
@@ -350,6 +246,14 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
@@ -362,6 +266,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
@@ -370,6 +278,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
@@ -382,22 +294,42 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
+ /* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
+ /* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
@@ -410,6 +342,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
@@ -423,13 +359,21 @@ static struct drm_display_mode drm_dmt_modes[] = {
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
@@ -442,15 +386,23 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
+ /* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -458,6 +410,14 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@@ -470,6 +430,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
@@ -478,6 +442,14 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
@@ -490,28 +462,1135 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+/*
+ * These more or less come from the DMT spec. The 720x400 modes are
+ * inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
+ * modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
+ * should be 1152x870, again for the Mac, but instead we use the x864 DMT
+ * mode.
+ *
+ * The DMT modes have been fact-checked; the rest are mild guesses.
+ */
+static const struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+ short w;
+ short h;
+ short r;
+ short rb;
+};
+
+static const struct minimode est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 75, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 1 - 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 2 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 3 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 4 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 5 - 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 6 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 7 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 8 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 9 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 10 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 11 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 12 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 13 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 14 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 15 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 16 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 17 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 18 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 19 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 20 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 21 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 22 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 23 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 24 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 25 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 26 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 27 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 28 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 29 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 30 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 31 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 32 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 33 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 34 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 35 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 36 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 37 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 38 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 39 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 40 - 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 41 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 42 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 43 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 44 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 45 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 46 - 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 47 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 48 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 49 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 50 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 51 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 52 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 53 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 54 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 55 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 56 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 57 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 58 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ /* 59 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 60 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 61 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 62 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 63 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 64 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+};
+
+/*
+ * HDMI 1.4 4k modes.
+ */
+static const struct drm_display_mode edid_4k_modes[] = {
+ /* 1 - 3840x2160@30Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 4016, 4104, 4400, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, },
+ /* 2 - 3840x2160@25Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 4896, 4984, 5280, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, },
+ /* 3 - 3840x2160@24Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 5116, 5204, 5500, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
+ /* 4 - 4096x2160@24Hz (SMPTE) */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 4096, 5116, 5204, 5500, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
+};
+
+/*** DDC fetch and block validation ***/
+
+static const u8 edid_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+/**
+ * drm_edid_header_is_valid - sanity check the header of the base EDID block
+ * @raw_edid: pointer to raw base EDID block
+ *
+ * Sanity check the header of the base EDID block.
+ *
+ * Return: 8 if the header is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+EXPORT_SYMBOL(drm_edid_header_is_valid);
+
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+ "Minimum number of valid EDID header bytes (0-8, default 6)");
+
+/**
+ * drm_edid_block_valid - Sanity check the EDID block (base or extension)
+ * @raw_edid: pointer to raw EDID block
+ * @block: type of block to validate (0 for base, extension otherwise)
+ * @print_bad_edid: if true, dump bad EDID blocks to the console
+ *
+ * Validate a base or extension EDID block and optionally dump bad blocks to
+ * the console.
+ *
+ * Return: True if the block is valid, false otherwise.
+ */
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
int i;
- struct drm_display_mode *ptr, *mode;
-
- mode = NULL;
- for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
- if (hsize == ptr->hdisplay &&
- vsize == ptr->vdisplay &&
- fresh == drm_mode_vrefresh(ptr)) {
- /* get the expected default mode */
- mode = drm_mode_duplicate(dev, ptr);
+ u8 csum = 0;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (WARN_ON(!raw_edid))
+ return false;
+
+ if (edid_fixup > 8 || edid_fixup < 0)
+ edid_fixup = 6;
+
+ if (block == 0) {
+ int score = drm_edid_header_is_valid(raw_edid);
+ if (score == 8) ;
+ else if (score >= edid_fixup) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
+
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+ if (csum) {
+ if (print_bad_edid) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ }
+
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
+ }
+
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+
+bad:
+ if (print_bad_edid) {
+ printk(KERN_ERR "Raw EDID:\n");
+ print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+ raw_edid, EDID_LENGTH, false);
+ }
+ return false;
+}
+EXPORT_SYMBOL(drm_edid_block_valid);
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ *
+ * Return: True if the EDID data is valid, false otherwise.
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(drm_edid_is_valid);
+
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * drm_do_probe_ddc_edid() - get EDID information via I2C
+ * @adapter: I2C device adaptor
+ * @buf: EDID data buffer to be filled
+ * @block: 128 byte EDID block to start fetching from
+ * @len: EDID data buffer length to fetch
+ *
+ * Try to fetch EDID information by calling I2C driver functions.
+ *
+ * Return: 0 on success or -1 on failure.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ unsigned char segment = block >> 1;
+ unsigned char xfers = segment ? 3 : 2;
+ int ret, retries = 5;
+
+ /*
+ * The core I2C driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+ * of the individual block a few times seems to overcome this.
+ */
+ do {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_SEGMENT_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &segment,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+
+ /*
+ * Avoid sending the segment addr to not upset non-compliant
+ * DDC monitors.
+ */
+ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
+ if (ret == -ENXIO) {
+ DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
+ adapter->name);
+ break;
+ }
+ } while (ret != xfers && --retries);
+
+ return ret == xfers ? 0 : -1;
+}
+
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+ if (memchr_inv(in_edid, 0, length))
+ return false;
+
+ return true;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0, valid_extensions = 0;
+ u8 *block, *new;
+ bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
+ if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ connector->null_edid_counter++;
+ goto carp;
}
}
- return mode;
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
+ valid_extensions++;
+ break;
+ }
+ }
+
+ if (i == 4 && print_bad_edid) {
+ dev_warn(connector->dev->dev,
+ "%s: Ignoring invalid EDID block %d.\n",
+ connector->name, j);
+
+ connector->bad_edid_counter++;
+ }
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
+ }
+
+ return block;
+
+carp:
+ if (print_bad_edid) {
+ dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+ connector->name, j);
+ }
+ connector->bad_edid_counter++;
+
+out:
+ kfree(block);
+ return NULL;
+}
+
+/**
+ * drm_probe_ddc() - probe DDC presence
+ * @adapter: I2C adapter to probe
+ *
+ * Return: True on success, false on failure.
+ */
+bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+EXPORT_SYMBOL(drm_probe_ddc);
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: I2C adapter to use for DDC
+ *
+ * Poke the given I2C channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return: Pointer to valid EDID or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ return edid;
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/**
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+ * Return: Pointer to duplicated EDID or NULL on allocation failure.
+ */
+struct edid *drm_edid_duplicate(const struct edid *edid)
+{
+ return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_edid_duplicate);
+
+/*** EDID parsing ***/
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+ char edid_vendor[3];
+
+ edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+ edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+ ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+ edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+ return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+ struct edid_quirk *quirk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+
+ if (edid_vendor(edid, quirk->vendor) &&
+ (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ return quirk->quirks;
+ }
+
+ return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+ u32 quirks)
+{
+ struct drm_display_mode *t, *cur_mode, *preferred_mode;
+ int target_refresh = 0;
+ int cur_vrefresh, preferred_vrefresh;
+
+ if (list_empty(&connector->probed_modes))
+ return;
+
+ if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ target_refresh = 60;
+ if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ target_refresh = 75;
+
+ preferred_mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+ cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+ if (cur_mode == preferred_mode)
+ continue;
+
+ /* Largest mode is preferred */
+ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+ preferred_mode = cur_mode;
+
+ cur_vrefresh = cur_mode->vrefresh ?
+ cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
+ preferred_vrefresh = preferred_mode->vrefresh ?
+ preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
+ /* At a given size, try to get closest to target refresh */
+ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+ MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
+ MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
+ preferred_mode = cur_mode;
+ }
+ }
+
+ preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ *
+ * Return: A newly allocated copy of the mode, or NULL if not found.
+ */
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hsize != ptr->hdisplay)
+ continue;
+ if (vsize != ptr->vdisplay)
+ continue;
+ if (fresh != drm_mode_vrefresh(ptr))
+ continue;
+ if (rb != mode_is_rb(ptr))
+ continue;
+
+ return drm_mode_duplicate(dev, ptr);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ int i, n = 0;
+ u8 d = ext[0x02];
+ u8 *det_base = ext + d;
+
+ n = (127 - d) / 18;
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ unsigned int i, n = min((int)ext[0x02], 6);
+ u8 *det_base = ext + 5;
+
+ if (ext[0x01] != 1)
+ return; /* unknown version */
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ for (i = 1; i <= raw_edid[0x7e]; i++) {
+ u8 *ext = raw_edid + (i * EDID_LENGTH);
+ switch (*ext) {
+ case CEA_EXT:
+ cea_for_each_detailed_block(ext, cb, closure);
+ break;
+ case VTB_EXT:
+ vtb_for_each_detailed_block(ext, cb, closure);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret = false;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
}
/*
@@ -528,27 +1607,26 @@ bad_std_timing(u8 a, u8 b)
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @connector: connector of for the EDID block
+ * @edid: EDID block to scan
* @t: standard timing params
- * @timing_level: standard timing level
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
*/
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t)
{
- struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
@@ -559,7 +1637,7 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0) {
- if (revision < 3)
+ if (edid->revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
@@ -569,27 +1647,73 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
- mode->vsync_start = mode->vsync_start - 1;
- mode->vsync_end = mode->vsync_end - 1;
+ mode->hsync_start = mode->hsync_start - 1;
+ mode->hsync_end = mode->hsync_end - 1;
return mode;
}
- mode = NULL;
+
/* check whether it can be found in default mode table */
- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ if (drm_monitor_supports_rb(edid)) {
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+ true);
+ if (mode)
+ return mode;
+ }
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
+ /* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ drm_mode_destroy(dev, mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
@@ -598,6 +1722,48 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
return mode;
}
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
@@ -621,7 +1787,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
@@ -629,12 +1795,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
if (pt->misc & DRM_EDID_PT_STEREO) {
- printk(KERN_WARNING "stereo mode not supported\n");
+ DRM_DEBUG_KMS("stereo mode not supported\n");
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- printk(KERN_WARNING "integrated sync not supported\n");
- return NULL;
+ DRM_DEBUG_KMS("composite sync not supported\n");
}
/* it is incorrect if hsync/vsync width is zero */
@@ -643,12 +1808,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
mode = drm_mode_create(dev);
if (!mode)
return NULL;
- mode->type = DRM_MODE_TYPE_DRIVER;
-
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
@@ -664,25 +1836,13 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
- /* perform the basic check for the detailed timing */
- if (mode->hsync_end > mode->htotal ||
- mode->vsync_end > mode->vtotal) {
- drm_mode_destroy(dev, mode);
- DRM_DEBUG_KMS("Incorrect detailed timing. "
- "Sync is beyond the blank.\n");
- return NULL;
- }
-
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
- drm_mode_set_name(mode);
-
- if (pt->misc & DRM_EDID_PT_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
@@ -693,6 +1853,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
@@ -706,193 +1867,315 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->height_mm = edid->height_cm * 10;
}
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_set_name(mode);
+
return mode;
}
-/*
- * Detailed mode info for the EDID "established modes" data to use.
- */
-static struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
+static bool
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
+ hsync = drm_mode_hsync(mode);
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
+ return (hsync <= hmax && hsync >= hmin);
+}
-/**
- * add_established_modes - get est. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Each EDID block contains a bitmap of the supported "established modes" list
- * (defined above). Tease them out and add them to the global modes list.
- */
-static int add_established_modes(struct drm_connector *connector, struct edid *edid)
+static bool
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
+ return false;
+
+ if (!mode_in_vsync_range(mode, edid, t))
+ return false;
+
+ if ((max_clock = range_pixel_clock(edid, t)))
+ if (mode->clock > max_clock)
+ return false;
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
+
+ return true;
+}
+
+static bool valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
+static int
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
{
- struct drm_device *dev = connector->dev;
- unsigned long est_bits = edid->established_timings.t1 |
- (edid->established_timings.t2 << 8) |
- ((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
- for (i = 0; i <= EDID_EST_TIMINGS; i++)
- if (est_bits & (1<<i)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
+ }
return modes;
}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
+
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
*/
-static int standard_timing_level(struct edid *edid)
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
+ if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
}
- return LEVEL_DMT;
}
-/**
- * add_standard_modes - get std. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Standard modes can be calculated using the CVT standard. Grab them from
- * @edid, calculate them, and add them to the list.
- */
-static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
{
- struct drm_device *dev = connector->dev;
int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
- for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
- struct drm_display_mode *newmode;
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+ if (!newmode)
+ return modes;
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
continue;
+ }
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ bool rb = drm_monitor_supports_rb(edid);
+
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
}
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
}
return modes;
}
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
-static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct detailed_data_monitor_range *range = &data->data.range;
- range = &timing->data.other_data.data.range;
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ return;
- hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
+ closure->modes += drm_dmt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+
+ if (!version_greater(closure->edid, 1, 1))
+ return; /* GTF not defined yet */
+
+ switch (range->flags) {
+ case 0x02: /* secondary gtf, XXX could do more */
+ case 0x00: /* default gtf */
+ closure->modes += drm_gtf_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x04: /* cvt, only in 1.4+ */
+ if (!version_greater(closure->edid, 1, 3))
+ break;
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
- return false;
+ closure->modes += drm_cvt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x01: /* just the ranges, no formula */
+ default:
+ break;
+ }
+}
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
- return false;
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
- if (mode->clock > max_clock)
- return false;
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+ &closure);
+
+ return closure.modes;
+}
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j >= 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= ARRAY_SIZE(est3_modes))
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r,
+ est3_modes[m].rb);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
}
- return true;
+ return modes;
}
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_EST_TIMINGS)
+ closure->modes += drm_est3_modes(closure->connector, timing);
+}
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @connector: connector to add mode(s) to
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
*/
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
{
- int i, modes = 0;
- struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
- newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -900,7 +2183,71 @@ static int drm_gtf_modes_for_range(struct drm_connector *connector,
}
}
- return modes;
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid,
+ do_established_modes, &closure);
+
+ return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_connector *connector = closure->connector;
+ struct edid *edid = closure->edid;
+
+ if (data->type == EDID_DETAIL_STD_MODES) {
+ int i;
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(connector, edid, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
+ }
+ }
+ }
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @connector: connector to add mode(s) to
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
+ */
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+ &closure);
+
+ /* XXX should also look for standard codes in VTB blocks */
+
+ return modes + closure.modes;
}
static int drm_cvt_modes(struct drm_connector *connector,
@@ -952,349 +2299,1329 @@ static int drm_cvt_modes(struct drm_connector *connector,
return modes;
}
-static int add_detailed_modes(struct drm_connector *connector,
- struct detailed_timing *timing,
- struct edid *edid, u32 quirks, int preferred)
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
{
- int i, modes = 0;
+ struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
- int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+
+ if (data->type == EDID_DETAIL_CVT_3BYTE)
+ closure->modes += drm_cvt_modes(closure->connector, timing);
+}
+
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 2))
+ drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
+
+ /* XXX should also look for CVT codes in VTB blocks */
+
+ return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
if (!newmode)
- return 0;
+ return;
- if (preferred)
+ if (closure->preferred)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, newmode);
- return 1;
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = 0;
}
+}
- /* other timing types */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_RANGE:
- if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
- break;
- case EDID_DETAIL_STD_MODES:
- /* Six modes per detailed section */
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ u32 quirks)
+{
+ struct detailed_mode_closure closure = {
+ connector,
+ edid,
+ 1,
+ quirks,
+ 0
+ };
- std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- case EDID_DETAIL_CVT_3BYTE:
- modes += drm_cvt_modes(connector, timing);
- break;
- default:
- break;
+ if (closure.preferred && !version_greater(edid, 1, 3))
+ closure.preferred =
+ (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+ drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+ return closure.modes;
+}
+
+#define AUDIO_BLOCK 0x01
+#define VIDEO_BLOCK 0x02
+#define VENDOR_BLOCK 0x03
+#define SPEAKER_BLOCK 0x04
+#define VIDEO_CAPABILITY_BLOCK 0x07
+#define EDID_BASIC_AUDIO (1 << 6)
+#define EDID_CEA_YCRCB444 (1 << 5)
+#define EDID_CEA_YCRCB422 (1 << 4)
+#define EDID_CEA_VCDB_QS (1 << 6)
+
+/*
+ * Search EDID for CEA extension block.
+ */
+static u8 *drm_find_cea_extension(struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
}
- return modes;
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+/*
+ * Calculate the alternate clock for the CEA mode
+ * (60Hz vs. 59.94Hz etc.)
+ */
+static unsigned int
+cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
+{
+ unsigned int clock = cea_mode->clock;
+
+ if (cea_mode->vrefresh % 6 != 0)
+ return clock;
+
+ /*
+ * edid_cea_modes contains the 59.94Hz
+ * variant for 240 and 480 line modes,
+ * and the 60Hz variant otherwise.
+ */
+ if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
+ clock = clock * 1001 / 1000;
+ else
+ clock = DIV_ROUND_UP(clock * 1000, 1001);
+
+ return clock;
}
/**
- * add_detailed_info - get detailed mode info from EDID data
- * @connector: attached connector
- * @edid: EDID block to scan
- * @quirks: quirks to apply
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
*
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
+ * Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
*/
-static int add_detailed_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
- int i, modes = 0;
+ u8 mode;
- for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
- struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ if (!to_match->clock)
+ return 0;
- /* In 1.0, only timings are allowed */
- if (!timing->pixel_clock && edid->version == 1 &&
- edid->revision == 0)
- continue;
+ for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
+ const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
+ unsigned int clock1, clock2;
- modes += add_detailed_modes(connector, timing, edid, quirks,
- preferred);
- }
+ /* Check both 60Hz and 59.94Hz */
+ clock1 = cea_mode->clock;
+ clock2 = cea_mode_alternate_clock(cea_mode);
- return modes;
+ if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+ KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+ drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
}
+EXPORT_SYMBOL(drm_match_cea_mode);
/**
- * add_detailed_mode_eedid - get detailed mode info from addtional timing
- * EDID block
- * @connector: attached connector
- * @edid: EDID block to scan(It is only to get addtional timing EDID block)
- * @quirks: quirks to apply
+ * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
+ * the input VIC from the CEA mode list
+ * @video_code: ID given to each of the CEA modes
*
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
+ * Returns picture aspect ratio
*/
-static int add_detailed_info_eedid(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
- int i, modes = 0;
- char *edid_ext = NULL;
- struct detailed_timing *timing;
- int edid_ext_num;
- int start_offset, end_offset;
- int timing_level;
+ /* return picture aspect ratio for video_code - 1 to access the
+ * right array element
+ */
+ return edid_cea_modes[video_code-1].picture_aspect_ratio;
+}
+EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
+/*
+ * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
+ * specific block).
+ *
+ * It's almost like cea_mode_alternate_clock(), we just need to add an
+ * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
+ * one.
+ */
+static unsigned int
+hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
+{
+ if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
+ return hdmi_mode->clock;
+
+ return cea_mode_alternate_clock(hdmi_mode);
+}
+
+/*
+ * drm_match_hdmi_mode - look for a HDMI mode matching given mode
+ * @to_match: display mode
+ *
+ * An HDMI mode is one defined in the HDMI vendor specific block.
+ *
+ * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
+ */
+static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
+{
+ u8 mode;
+
+ if (!to_match->clock)
return 0;
+
+ for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
+ const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
+ unsigned int clock1, clock2;
+
+ /* Make sure to also match alternate clocks */
+ clock1 = hdmi_mode->clock;
+ clock2 = hdmi_mode_alternate_clock(hdmi_mode);
+
+ if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+ KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+ drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
+ return mode + 1;
}
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
+ return 0;
+}
+
+static int
+add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *tmp;
+ LIST_HEAD(list);
+ int modes = 0;
+
+ /* Don't add CEA modes if the CEA extension block is missing */
+ if (!drm_find_cea_extension(edid))
return 0;
+
+ /*
+ * Go through all probed modes and create a new mode
+ * with the alternate clock for certain CEA modes.
+ */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ const struct drm_display_mode *cea_mode = NULL;
+ struct drm_display_mode *newmode;
+ u8 mode_idx = drm_match_cea_mode(mode) - 1;
+ unsigned int clock1, clock2;
+
+ if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
+ cea_mode = &edid_cea_modes[mode_idx];
+ clock2 = cea_mode_alternate_clock(cea_mode);
+ } else {
+ mode_idx = drm_match_hdmi_mode(mode) - 1;
+ if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
+ cea_mode = &edid_4k_modes[mode_idx];
+ clock2 = hdmi_mode_alternate_clock(cea_mode);
+ }
+ }
+
+ if (!cea_mode)
+ continue;
+
+ clock1 = cea_mode->clock;
+
+ if (clock1 == clock2)
+ continue;
+
+ if (mode->clock != clock1 && mode->clock != clock2)
+ continue;
+
+ newmode = drm_mode_duplicate(dev, cea_mode);
+ if (!newmode)
+ continue;
+
+ /* Carry over the stereo flags */
+ newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ /*
+ * The current mode could be either variant. Make
+ * sure to pick the "other" clock for the new mode.
+ */
+ if (mode->clock != clock1)
+ newmode->clock = clock1;
+ else
+ newmode->clock = clock2;
+
+ list_add_tail(&newmode->head, &list);
}
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
+ list_for_each_entry_safe(mode, tmp, &list, head) {
+ list_del(&mode->head);
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
- /* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
- break;
+ return modes;
+}
+
+static struct drm_display_mode *
+drm_display_mode_from_vic_index(struct drm_connector *connector,
+ const u8 *video_db, u8 video_len,
+ u8 video_index)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *newmode;
+ u8 cea_mode;
+
+ if (video_db == NULL || video_index >= video_len)
+ return NULL;
+
+ /* CEA modes are numbered 1..127 */
+ cea_mode = (video_db[video_index] & 127) - 1;
+ if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+ return NULL;
+
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ if (!newmode)
+ return NULL;
+
+ newmode->vrefresh = 0;
+
+ return newmode;
+}
+
+static int
+do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+{
+ int i, modes = 0;
+
+ for (i = 0; i < len; i++) {
+ struct drm_display_mode *mode;
+ mode = drm_display_mode_from_vic_index(connector, db, len, i);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
}
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
- return 0;
+ return modes;
+}
+
+struct stereo_mandatory_mode {
+ int width, height, vrefresh;
+ unsigned int flags;
+};
+
+static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
+ { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
+ { 1920, 1080, 50,
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+ { 1920, 1080, 60,
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+ { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
+ { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+ { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
+};
+
+static bool
+stereo_match_mandatory(const struct drm_display_mode *mode,
+ const struct stereo_mandatory_mode *stereo_mode)
+{
+ unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+ return mode->hdisplay == stereo_mode->width &&
+ mode->vdisplay == stereo_mode->height &&
+ interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
+}
+
+static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ const struct drm_display_mode *mode;
+ struct list_head stereo_modes;
+ int modes = 0, i;
+
+ INIT_LIST_HEAD(&stereo_modes);
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
+ const struct stereo_mandatory_mode *mandatory;
+ struct drm_display_mode *new_mode;
+
+ if (!stereo_match_mandatory(mode,
+ &stereo_mandatory_modes[i]))
+ continue;
+
+ mandatory = &stereo_mandatory_modes[i];
+ new_mode = drm_mode_duplicate(dev, mode);
+ if (!new_mode)
+ continue;
+
+ new_mode->flags |= mandatory->flags;
+ list_add_tail(&new_mode->head, &stereo_modes);
+ modes++;
+ }
}
- /* Get the start offset of detailed timing block */
- start_offset = edid_ext[2];
- if (start_offset == 0) {
- /* If the start_offset is zero, it means that neither detailed
- * info nor data block exist. In such case it is also
- * unnecessary to parse the detailed timing info.
- */
+ list_splice_tail(&stereo_modes, &connector->probed_modes);
+
+ return modes;
+}
+
+static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *newmode;
+
+ vic--; /* VICs start at 1 */
+ if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+ DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
return 0;
}
- timing_level = standard_timing_level(edid);
- end_offset = EDID_LENGTH;
- end_offset -= sizeof(struct detailed_timing);
- for (i = start_offset; i < end_offset;
- i += sizeof(struct detailed_timing)) {
- timing = (struct detailed_timing *)(edid_ext + i);
- modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+ newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+ if (!newmode)
+ return 0;
+
+ drm_mode_probed_add(connector, newmode);
+
+ return 1;
+}
+
+static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
+ const u8 *video_db, u8 video_len, u8 video_index)
+{
+ struct drm_display_mode *newmode;
+ int modes = 0;
+
+ if (structure & (1 << 0)) {
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
+ if (newmode) {
+ newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ if (structure & (1 << 6)) {
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
+ if (newmode) {
+ newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ if (structure & (1 << 8)) {
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
+ if (newmode) {
+ newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
}
return modes;
}
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
+/*
+ * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ * @len: length of the CEA block payload, ie. one can access up to db[len]
*
- * Try to fetch EDID information by calling i2c driver function.
+ * Parses the HDMI VSDB looking for modes to add to @connector. This function
+ * also adds the stereo 3d modes when applicable.
*/
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
+static int
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
+ const u8 *video_db, u8 video_len)
+{
+ int modes = 0, offset = 0, i, multi_present = 0, multi_len;
+ u8 vic_len, hdmi_3d_len = 0;
+ u16 mask;
+ u16 structure_all;
+
+ if (len < 8)
+ goto out;
+
+ /* no HDMI_Video_Present */
+ if (!(db[8] & (1 << 5)))
+ goto out;
+
+ /* Latency_Fields_Present */
+ if (db[8] & (1 << 7))
+ offset += 2;
+
+ /* I_Latency_Fields_Present */
+ if (db[8] & (1 << 6))
+ offset += 2;
+
+ /* the declared length is not long enough for the 2 first bytes
+ * of additional video format capabilities */
+ if (len < (8 + offset + 2))
+ goto out;
+
+ /* 3D_Present */
+ offset++;
+ if (db[8 + offset] & (1 << 7)) {
+ modes += add_hdmi_mandatory_stereo_modes(connector);
+
+ /* 3D_Multi_present */
+ multi_present = (db[8 + offset] & 0x60) >> 5;
+ }
+
+ offset++;
+ vic_len = db[8 + offset] >> 5;
+ hdmi_3d_len = db[8 + offset] & 0x1f;
+
+ for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
+ u8 vic;
+
+ vic = db[9 + offset + i];
+ modes += add_hdmi_mode(connector, vic);
+ }
+ offset += 1 + vic_len;
+
+ if (multi_present == 1)
+ multi_len = 2;
+ else if (multi_present == 2)
+ multi_len = 4;
+ else
+ multi_len = 0;
+
+ if (len < (8 + offset + hdmi_3d_len - 1))
+ goto out;
+
+ if (hdmi_3d_len < multi_len)
+ goto out;
+
+ if (multi_present == 1 || multi_present == 2) {
+ /* 3D_Structure_ALL */
+ structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+ /* check if 3D_MASK is present */
+ if (multi_present == 2)
+ mask = (db[10 + offset] << 8) | db[11 + offset];
+ else
+ mask = 0xffff;
+
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i))
+ modes += add_3d_struct_modes(connector,
+ structure_all,
+ video_db,
+ video_len, i);
}
- };
+ }
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
+ offset += multi_len;
+
+ for (i = 0; i < (hdmi_3d_len - multi_len); i++) {
+ int vic_index;
+ struct drm_display_mode *newmode = NULL;
+ unsigned int newflag = 0;
+ bool detail_present;
+
+ detail_present = ((db[8 + offset + i] & 0x0f) > 7);
+
+ if (detail_present && (i + 1 == hdmi_3d_len - multi_len))
+ break;
+
+ /* 2D_VIC_order_X */
+ vic_index = db[8 + offset + i] >> 4;
- return -1;
+ /* 3D_Structure_X */
+ switch (db[8 + offset + i] & 0x0f) {
+ case 0:
+ newflag = DRM_MODE_FLAG_3D_FRAME_PACKING;
+ break;
+ case 6:
+ newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+ break;
+ case 8:
+ /* 3D_Detail_X */
+ if ((db[9 + offset + i] >> 4) == 1)
+ newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ break;
+ }
+
+ if (newflag != 0) {
+ newmode = drm_display_mode_from_vic_index(connector,
+ video_db,
+ video_len,
+ vic_index);
+
+ if (newmode) {
+ newmode->flags |= newflag;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (detail_present)
+ i++;
+ }
+
+out:
+ return modes;
}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
+static int
+cea_db_payload_len(const u8 *db)
{
- int i;
+ return db[0] & 0x1f;
+}
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (edid_is_valid((struct edid *)buf))
+static int
+cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IEEE_OUI;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ const u8 *cea = drm_find_cea_extension(edid);
+ const u8 *db, *hdmi = NULL, *video = NULL;
+ u8 dbl, hdmi_len, video_len = 0;
+ int modes = 0;
+
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
return 0;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ if (cea_db_tag(db) == VIDEO_BLOCK) {
+ video = db + 1;
+ video_len = dbl;
+ modes += do_cea_modes(connector, video, dbl);
+ }
+ else if (cea_db_is_hdmi_vsdb(db)) {
+ hdmi = db;
+ hdmi_len = dbl;
+ }
+ }
}
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
+ /*
+ * We parse the HDMI VSDB after having added the cea modes as we will
+ * be patching their flags when the sink supports stereo 3D.
+ */
+ if (hdmi)
+ modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
+ video_len);
+
+ return modes;
+}
+
+static void
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
+{
+ u8 len = cea_db_payload_len(db);
+
+ if (len >= 6) {
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+ connector->dvi_dual = db[6] & 1;
+ }
+ if (len >= 7)
+ connector->max_tmds_clock = db[7] * 5;
+ if (len >= 8) {
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ }
+ if (len >= 9)
+ connector->video_latency[0] = db[9];
+ if (len >= 10)
+ connector->audio_latency[0] = db[10];
+ if (len >= 11)
+ connector->video_latency[1] = db[11];
+ if (len >= 12)
+ connector->audio_latency[1] = db[12];
+
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
+ "max TMDS clock %d, "
+ "latency present %d %d, "
+ "video latency %d %d, "
+ "audio latency %d %d\n",
+ connector->dvi_dual,
+ connector->max_tmds_clock,
+ (int) connector->latency_present[0],
+ (int) connector->latency_present[1],
+ connector->video_latency[0],
+ connector->video_latency[1],
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+}
+
+static void
+monitor_name(struct detailed_timing *t, void *data)
+{
+ if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+ *(u8 **)data = t->data.other_data.data.str.str;
}
/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
*
- * Poke the given connector's i2c channel to grab EDID data if possible.
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
+ * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
+ * fill in.
+ */
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+ uint8_t *eld = connector->eld;
+ u8 *cea;
+ u8 *name;
+ u8 *db;
+ int sad_count = 0;
+ int mnl;
+ int dbl;
+
+ memset(eld, 0, sizeof(connector->eld));
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+ return;
+ }
+
+ name = NULL;
+ drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ for (mnl = 0; name && mnl < 13; mnl++) {
+ if (name[mnl] == 0x0a)
+ break;
+ eld[20 + mnl] = name[mnl];
+ }
+ eld[4] = (cea[1] << 5) | mnl;
+ DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+ eld[0] = 2 << 3; /* ELD version: 2 */
+
+ eld[16] = edid->mfg_id[0];
+ eld[17] = edid->mfg_id[1];
+ eld[18] = edid->prod_code[0];
+ eld[19] = edid->prod_code[1];
+
+ if (cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ start = 0;
+ end = 0;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ switch (cea_db_tag(db)) {
+ case AUDIO_BLOCK:
+ /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ if (dbl >= 1)
+ memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK:
+ /* Speaker Allocation Data Block */
+ if (dbl >= 1)
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (cea_db_is_hdmi_vsdb(db))
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ eld[5] |= sad_count << 4;
+ eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+EXPORT_SYMBOL(drm_edid_to_eld);
+
+/**
+ * drm_edid_to_sad - extracts SADs from EDID
+ * @edid: EDID to parse
+ * @sads: pointer that will be set to the extracted SADs
+ *
+ * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
*
- * Return edid data or NULL if we couldn't find any.
+ * Note: The returned pointer needs to be freed using kfree().
+ *
+ * Return: The number of found SADs or negative number on error.
*/
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
{
- int ret;
- struct edid *edid;
+ int count = 0;
+ int i, start, end, dbl;
+ u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
- edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUPP;
}
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, MAX_EDID_EXT_NUM,
- MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = MAX_EDID_EXT_NUM;
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == AUDIO_BLOCK) {
+ int j;
+ dbl = cea_db_payload_len(db);
+
+ count = dbl / 3; /* SAD is 3B */
+ *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
+ if (!*sads)
+ return -ENOMEM;
+ for (j = 0; j < count; j++) {
+ u8 *sad = &db[1 + j * 3];
+
+ (*sads)[j].format = (sad[0] & 0x78) >> 3;
+ (*sads)[j].channels = sad[0] & 0x7;
+ (*sads)[j].freq = sad[1] & 0x7F;
+ (*sads)[j].byte2 = sad[2];
+ }
+ break;
}
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(drm_edid_to_sad);
+
+/**
+ * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
+ * @edid: EDID to parse
+ * @sadb: pointer to the speaker block
+ *
+ * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
+ *
+ * Note: The returned pointer needs to be freed using kfree().
+ *
+ * Return: The number of found Speaker Allocation Blocks or negative number on
+ * error.
+ */
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
+{
+ int count = 0;
+ int i, start, end, dbl;
+ const u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUPP;
}
- connector->display_info.raw_edid = (char *)edid;
- goto end;
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
+ for_each_cea_db(cea, i, start, end) {
+ const u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == SPEAKER_BLOCK) {
+ dbl = cea_db_payload_len(db);
+ /* Speaker Allocation Data Block */
+ if (dbl == 3) {
+ *sadb = kmemdup(&db[1], dbl, GFP_KERNEL);
+ if (!*sadb)
+ return -ENOMEM;
+ count = dbl;
+ break;
+ }
+ }
+ }
+
+ return count;
}
-EXPORT_SYMBOL(drm_get_edid);
+EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
+
+/**
+ * drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ *
+ * Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if
+ * the sink doesn't support audio or video.
+ */
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ int a, v;
+
+ if (!connector->latency_present[0])
+ return 0;
+ if (!connector->latency_present[1])
+ i = 0;
+
+ a = connector->audio_latency[i];
+ v = connector->video_latency[i];
+
+ /*
+ * HDMI/DP sink doesn't support audio or video?
+ */
+ if (a == 255 || v == 255)
+ return 0;
+
+ /*
+ * Convert raw EDID values to millisecond.
+ * Treat unknown latency as 0ms.
+ */
+ if (a)
+ a = min(2 * (a - 1), 500);
+ if (v)
+ v = min(2 * (v - 1), 500);
+
+ return max(v - a, 0);
+}
+EXPORT_SYMBOL(drm_av_sync_delay);
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ *
+ * Return: The connector associated with the first HDMI/DP sink that has ELD
+ * attached to it.
+ */
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder && connector->eld[0])
+ return connector;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_select_eld);
-#define HDMI_IDENTIFIER 0x000C03
-#define VENDOR_BLOCK 0x03
/**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
* Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
+ *
+ * Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
- char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
+ u8 *edid_ext;
+ int i;
int start_offset, end_offset;
- bool is_hdmi = false;
- /* No EDID or EDID extensions */
- if (edid == NULL || edid->extensions == 0)
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ * @edid: EDID block to scan
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ * Return: True if the monitor supports audio, false otherwise.
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, j;
+ bool has_audio = false;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
goto end;
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
+ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
- /* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
- break;
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
}
- if (i == edid_ext_num)
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
goto end;
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
+ has_audio = true;
+ for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (edid_ext[i + j] >> 3) & 0xf);
+ goto end;
+ }
+ }
+end:
+ return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
+/**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ * @edid: EDID block to scan
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
+ *
+ * Return: True if the RGB quantization range is selectable, false otherwise.
+ */
+bool drm_rgb_quant_range_selectable(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, start, end;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start, &end))
+ return false;
+
+ for_each_cea_db(edid_ext, i, start, end) {
+ if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
+ cea_db_payload_len(&edid_ext[i]) == 2) {
+ DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
+ return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
+
+/**
+ * drm_assign_hdmi_deep_color_info - detect whether monitor supports
+ * hdmi deep color modes and update drm_display_info if so.
+ *
+ * @edid: monitor EDID information
+ * @info: Updated with maximum supported deep color bpc and color format
+ * if deep color supported.
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI deep color supported, false if not or unknown.
+ */
+static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
+ struct drm_display_info *info,
+ struct drm_connector *connector)
+{
+ u8 *edid_ext, *hdmi;
+ int i;
+ int start_offset, end_offset;
+ unsigned int dc_bpc = 0;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
- for (i = start_offset; i < end_offset;
- /* Increased by data block len */
- i += ((edid_ext[i] & 0x1f) + 1)) {
- /* Find vendor specific block */
- if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
- hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
- edid_ext[i + 3] << 16;
- /* Find HDMI identifier */
- if (hdmi_id == HDMI_IDENTIFIER)
- is_hdmi = true;
- break;
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
+ /* HDMI supports at least 8 bpc */
+ info->bpc = 8;
+
+ hdmi = &edid_ext[i];
+ if (cea_db_payload_len(hdmi) < 6)
+ return false;
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
+ dc_bpc = 10;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
+ DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
+ dc_bpc = 12;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
+ DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
+ dc_bpc = 16;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
+ DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
+ connector->name);
+ }
+
+ if (dc_bpc > 0) {
+ DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
+ connector->name, dc_bpc);
+ info->bpc = dc_bpc;
+
+ /*
+ * Deep color support mandates RGB444 support for all video
+ * modes and forbids YCRCB422 support for all video modes per
+ * HDMI 1.3 spec.
+ */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ /* YCRCB444 is optional according to spec. */
+ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
+ connector->name);
+ }
+
+ /*
+ * Spec says that if any deep color mode is supported at all,
+ * then deep color 36 bit must be supported.
+ */
+ if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
+ DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
+ connector->name);
+ }
+
+ return true;
+ }
+ else {
+ DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
+ connector->name);
+ }
}
}
-end:
- return is_hdmi;
+ return false;
+}
+
+/**
+ * drm_add_display_info - pull display info out if present
+ * @edid: EDID data
+ * @info: display info (attached to connector)
+ * @connector: connector whose edid is used to build display info
+ *
+ * Grab any available display info and stuff it into the drm_display_info
+ * structure that's part of the connector. Useful for tracking bpp and
+ * color spaces.
+ */
+static void drm_add_display_info(struct edid *edid,
+ struct drm_display_info *info,
+ struct drm_connector *connector)
+{
+ u8 *edid_ext;
+
+ info->width_mm = edid->width_cm * 10;
+ info->height_mm = edid->height_cm * 10;
+
+ /* driver figures it out in this case */
+ info->bpc = 0;
+ info->color_formats = 0;
+
+ if (edid->revision < 3)
+ return;
+
+ if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+ return;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (edid_ext) {
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
+ /* HDMI deep color modes supported? Assign to info, if so */
+ drm_assign_hdmi_deep_color_info(edid, info, connector);
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
+ switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ case DRM_EDID_DIGITAL_DEPTH_6:
+ info->bpc = 6;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_8:
+ info->bpc = 8;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_10:
+ info->bpc = 10;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_12:
+ info->bpc = 12;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_14:
+ info->bpc = 14;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_16:
+ info->bpc = 16;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+ default:
+ info->bpc = 0;
+ break;
+ }
+
+ DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
+ connector->name, info->bpc);
+
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
-EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
- * @edid: edid data
+ * @edid: EDID data
*
* Add the specified modes to the connector's mode list.
*
- * Return number of modes added or 0 if we couldn't find any.
+ * Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
@@ -1304,39 +3631,47 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (edid == NULL) {
return 0;
}
- if (!edid_is_valid(edid)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
+ if (!drm_edid_is_valid(edid)) {
+ dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
+ connector->name);
return 0;
}
quirks = edid_get_quirks(edid);
- num_modes += add_established_modes(connector, edid);
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We get this pretty much right.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
+ num_modes += add_detailed_modes(connector, edid, quirks);
+ num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
- num_modes += add_detailed_info(connector, edid, quirks);
- num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_established_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
+ num_modes += add_alternate_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
- connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
- connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
- connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
- connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
- connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
- connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
- connector->display_info.width_mm = edid->width_cm * 10;
- connector->display_info.height_mm = edid->height_cm * 10;
- connector->display_info.gamma = edid->gamma;
- connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
- connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
- connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
- connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
- connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
- connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
- connector->display_info.gamma = edid->gamma;
+ drm_add_display_info(edid, &connector->display_info, connector);
+
+ if (quirks & EDID_QUIRK_FORCE_8BPC)
+ connector->display_info.bpc = 8;
+
+ if (quirks & EDID_QUIRK_FORCE_12BPC)
+ connector->display_info.bpc = 12;
return num_modes;
}
@@ -1351,13 +3686,13 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Add the specified modes to the connector's mode list. Only when the
* hdisplay/vdisplay is not beyond the given limit, it will be added.
*
- * Return number of modes added or 0 if we couldn't find any.
+ * Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
+ struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -1367,7 +3702,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
@@ -1389,3 +3724,137 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_set_preferred_mode - Sets the preferred mode of a connector
+ * @connector: connector whose mode list should be processed
+ * @hpref: horizontal resolution of preferred mode
+ * @vpref: vertical resolution of preferred mode
+ *
+ * Marks a mode as preferred if it matches the resolution specified by @hpref
+ * and @vpref.
+ */
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (mode->hdisplay == hpref &&
+ mode->vdisplay == vpref)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
+
+/**
+ * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI AVI infoframe
+ * @mode: DRM display mode
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int
+drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode)
+{
+ int err;
+
+ if (!frame || !mode)
+ return -EINVAL;
+
+ err = hdmi_avi_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ frame->pixel_repeat = 1;
+
+ frame->video_code = drm_match_cea_mode(mode);
+
+ frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+
+ /* Populate picture aspect ratio from CEA mode list */
+ if (frame->video_code > 0)
+ frame->picture_aspect = drm_get_cea_aspect_ratio(
+ frame->video_code);
+
+ frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+ frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+
+static enum hdmi_3d_structure
+s3d_structure_from_display_mode(const struct drm_display_mode *mode)
+{
+ u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ switch (layout) {
+ case DRM_MODE_FLAG_3D_FRAME_PACKING:
+ return HDMI_3D_STRUCTURE_FRAME_PACKING;
+ case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
+ return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
+ case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
+ return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
+ case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
+ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
+ case DRM_MODE_FLAG_3D_L_DEPTH:
+ return HDMI_3D_STRUCTURE_L_DEPTH;
+ case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
+ return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
+ case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
+ return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
+ case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
+ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
+ default:
+ return HDMI_3D_STRUCTURE_INVALID;
+ }
+}
+
+/**
+ * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI vendor infoframe
+ * @mode: DRM display mode
+ *
+ * Note that there's is a need to send HDMI vendor infoframes only when using a
+ * 4k or stereoscopic 3D mode. So when giving any other mode as input this
+ * function will return -EINVAL, error that can be safely ignored.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int
+drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ const struct drm_display_mode *mode)
+{
+ int err;
+ u32 s3d_flags;
+ u8 vic;
+
+ if (!frame || !mode)
+ return -EINVAL;
+
+ vic = drm_match_hdmi_mode(mode);
+ s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ if (!vic && !s3d_flags)
+ return -EINVAL;
+
+ if (vic && s3d_flags)
+ return -EINVAL;
+
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ if (vic)
+ frame->vic = vic;
+ else
+ frame->s3d_struct = s3d_structure_from_display_mode(mode);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
new file mode 100644
index 00000000000..0a235fe61c9
--- /dev/null
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -0,0 +1,294 @@
+/*
+ drm_edid_load.c: use a built-in EDID data set or load it via the firmware
+ interface
+
+ Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static char edid_firmware[PATH_MAX];
+module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
+MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
+ "from built-in data or /lib/firmware instead. ");
+
+#define GENERIC_EDIDS 6
+static const char *generic_edid_name[GENERIC_EDIDS] = {
+ "edid/800x600.bin",
+ "edid/1024x768.bin",
+ "edid/1280x1024.bin",
+ "edid/1600x1200.bin",
+ "edid/1680x1050.bin",
+ "edid/1920x1080.bin",
+};
+
+static const u8 generic_edid[GENERIC_EDIDS][128] = {
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
+ 0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
+ 0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
+ 0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19,
+ 0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90,
+ 0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58,
+ 0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a,
+ 0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70,
+ 0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
+ 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
+ 0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
+ 0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
+ 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39,
+ 0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0,
+ 0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57,
+ 0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46,
+ 0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05,
+ },
+};
+
+static int edid_size(const u8 *edid, int data_size)
+{
+ if (data_size < EDID_LENGTH)
+ return 0;
+
+ return (edid[0x7e] + 1) * EDID_LENGTH;
+}
+
+static void *edid_load(struct drm_connector *connector, const char *name,
+ const char *connector_name)
+{
+ const struct firmware *fw = NULL;
+ const u8 *fwdata;
+ u8 *edid;
+ int fwsize, builtin;
+ int i, valid_extensions = 0;
+ bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+
+ builtin = 0;
+ for (i = 0; i < GENERIC_EDIDS; i++) {
+ if (strcmp(name, generic_edid_name[i]) == 0) {
+ fwdata = generic_edid[i];
+ fwsize = sizeof(generic_edid[i]);
+ builtin = 1;
+ break;
+ }
+ }
+ if (!builtin) {
+ struct platform_device *pdev;
+ int err;
+
+ pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ DRM_ERROR("Failed to register EDID firmware platform device "
+ "for connector \"%s\"\n", connector_name);
+ return ERR_CAST(pdev);
+ }
+
+ err = request_firmware(&fw, name, &pdev->dev);
+ platform_device_unregister(pdev);
+ if (err) {
+ DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+ name, err);
+ return ERR_PTR(err);
+ }
+
+ fwdata = fw->data;
+ fwsize = fw->size;
+ }
+
+ if (edid_size(fwdata, fwsize) != fwsize) {
+ DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
+ "(expected %d, got %d\n", name,
+ edid_size(fwdata, fwsize), (int)fwsize);
+ edid = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
+ if (edid == NULL) {
+ edid = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
+ connector->bad_edid_counter++;
+ DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
+ name);
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ for (i = 1; i <= edid[0x7e]; i++) {
+ if (i != valid_extensions + 1)
+ memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
+ edid + i * EDID_LENGTH, EDID_LENGTH);
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
+ valid_extensions++;
+ }
+
+ if (valid_extensions != edid[0x7e]) {
+ u8 *new_edid;
+
+ edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+ DRM_INFO("Found %d valid extensions instead of %d in EDID data "
+ "\"%s\" for connector \"%s\"\n", valid_extensions,
+ edid[0x7e], name, connector_name);
+ edid[0x7e] = valid_extensions;
+
+ new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
+ GFP_KERNEL);
+ if (new_edid)
+ edid = new_edid;
+ }
+
+ DRM_INFO("Got %s EDID base block and %d extension%s from "
+ "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
+ "external", valid_extensions, valid_extensions == 1 ? "" : "s",
+ name, connector_name);
+
+out:
+ if (fw)
+ release_firmware(fw);
+ return edid;
+}
+
+int drm_load_edid_firmware(struct drm_connector *connector)
+{
+ const char *connector_name = connector->name;
+ char *edidname = edid_firmware, *last, *colon;
+ int ret;
+ struct edid *edid;
+
+ if (*edidname == '\0')
+ return 0;
+
+ colon = strchr(edidname, ':');
+ if (colon != NULL) {
+ if (strncmp(connector_name, edidname, colon - edidname))
+ return 0;
+ edidname = colon + 1;
+ if (*edidname == '\0')
+ return 0;
+ }
+
+ last = edidname + strlen(edidname) - 1;
+ if (*last == '\n')
+ *last = '\0';
+
+ edid = edid_load(connector, edidname, connector_name);
+ if (IS_ERR_OR_NULL(edid))
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index f0184696edf..d18b88b755c 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -24,7 +24,9 @@
*
*/
-#include "drm_encoder_slave.h"
+#include <linux/module.h>
+
+#include <drm/drm_encoder_slave.h>
/**
* drm_i2c_encoder_init - Initialize an I2C slave encoder
@@ -41,6 +43,9 @@
* &drm_encoder_slave. The @slave_funcs field will be initialized with
* the hooks provided by the slave driver.
*
+ * If @info->platform_data is non-NULL it will be used as the initial
+ * slave config.
+ *
* Returns 0 on success or a negative errno on failure, in particular,
* -ENODEV is returned when no matching driver is found.
*/
@@ -49,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
struct i2c_adapter *adap,
const struct i2c_board_info *info)
{
- char modalias[sizeof(I2C_MODULE_PREFIX)
- + I2C_NAME_SIZE];
struct module *module = NULL;
struct i2c_client *client;
struct drm_i2c_encoder_driver *encoder_drv;
int err = 0;
- snprintf(modalias, sizeof(modalias),
- "%s%s", I2C_MODULE_PREFIX, info->type);
- request_module(modalias);
+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_device(adap, info);
if (!client) {
@@ -66,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
goto fail;
}
- if (!client->driver) {
+ if (!client->dev.driver) {
err = -ENODEV;
goto fail_unregister;
}
- module = client->driver->driver.owner;
+ module = client->dev.driver->owner;
if (!try_module_get(module)) {
err = -ENODEV;
goto fail_unregister;
@@ -79,12 +80,16 @@ int drm_i2c_encoder_init(struct drm_device *dev,
encoder->bus_priv = client;
- encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+ encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
goto fail_unregister;
+ if (info->platform_data)
+ encoder->slave_funcs->set_config(&encoder->base,
+ info->platform_data);
+
return 0;
fail_unregister:
@@ -106,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
{
struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->driver->driver.owner;
+ struct module *module = client->dev.driver->owner;
i2c_unregister_device(client);
encoder->bus_priv = NULL;
@@ -114,3 +119,66 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
module_put(module);
}
EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
+void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ get_slave_funcs(encoder)->dpms(encoder, mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_dpms);
+
+bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
+
+void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_prepare);
+
+void drm_i2c_encoder_commit(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_commit);
+
+void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
+
+enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_detect);
+
+void drm_i2c_encoder_save(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->save(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_save);
+
+void drm_i2c_encoder_restore(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->restore(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
new file mode 100644
index 00000000000..f27c883be39
--- /dev/null
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -0,0 +1,449 @@
+/*
+ * drm kms/fb cma (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on udl_fbdev.c
+ * Copyright (C) 2012 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <linux/module.h>
+
+struct drm_fb_cma {
+ struct drm_framebuffer fb;
+ struct drm_gem_cma_object *obj[4];
+};
+
+struct drm_fbdev_cma {
+ struct drm_fb_helper fb_helper;
+ struct drm_fb_cma *fb;
+};
+
+static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
+{
+ return container_of(helper, struct drm_fbdev_cma, fb_helper);
+}
+
+static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct drm_fb_cma, fb);
+}
+
+static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (fb_cma->obj[i])
+ drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+ }
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb_cma);
+}
+
+static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int *handle)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+ return drm_gem_handle_create(file_priv,
+ &fb_cma->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+ .destroy = drm_fb_cma_destroy,
+ .create_handle = drm_fb_cma_create_handle,
+};
+
+static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+ unsigned int num_planes)
+{
+ struct drm_fb_cma *fb_cma;
+ int ret;
+ int i;
+
+ fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
+ if (!fb_cma)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb_cma->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+ if (ret) {
+ dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
+ kfree(fb_cma);
+ return ERR_PTR(ret);
+ }
+
+ return fb_cma;
+}
+
+/**
+ * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_fb_cma *fb_cma;
+ struct drm_gem_cma_object *objs[4];
+ struct drm_gem_object *obj;
+ unsigned int hsub;
+ unsigned int vsub;
+ int ret;
+ int i;
+
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+ if (!obj) {
+ dev_err(dev->dev, "Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + mode_cmd->offsets[i];
+
+ if (obj->size < min_size) {
+ drm_gem_object_unreference_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = to_drm_gem_cma_obj(obj);
+ }
+
+ fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(fb_cma)) {
+ ret = PTR_ERR(fb_cma);
+ goto err_gem_object_unreference;
+ }
+
+ return &fb_cma->fb;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_unreference_unlocked(&objs[i]->base);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create);
+
+/**
+ * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Return the CMA GEM object for given framebuffer.
+ *
+ * This function will usually be called from the CRTC callback functions.
+ */
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+ if (plane >= 4)
+ return NULL;
+
+ return fb_cma->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * drm_fb_cma_describe() - Helper to dump information about a single
+ * CMA framebuffer object
+ */
+static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+ (char *)&fb->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ drm_gem_cma_describe(fb_cma->obj[i], m);
+ }
+}
+
+/**
+ * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
+ * in debugfs.
+ */
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+ drm_fb_cma_describe(fb, m);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
+#endif
+
+static struct fb_ops drm_fbdev_cma_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *dev = helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = drm_gem_cma_create(dev, size);
+ if (IS_ERR(obj))
+ return -ENOMEM;
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+ if (IS_ERR(fbdev_cma->fb)) {
+ dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev_cma->fb);
+ goto err_framebuffer_release;
+ }
+
+ fb = &fbdev_cma->fb->fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &drm_fbdev_cma_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(dev->dev, "Failed to allocate color map.\n");
+ goto err_drm_fb_cma_destroy;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ dev->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = obj->vaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+
+err_drm_fb_cma_destroy:
+ drm_framebuffer_unregister_private(fb);
+ drm_fb_cma_destroy(fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+ .fb_probe = drm_fbdev_cma_create,
+};
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count)
+{
+ struct drm_fbdev_cma *fbdev_cma;
+ struct drm_fb_helper *helper;
+ int ret;
+
+ fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+ if (!fbdev_cma) {
+ dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
+ helper = &fbdev_cma->fb_helper;
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+
+ }
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(helper, preferred_bpp);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fbdev_cma;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(helper);
+err_free:
+ kfree(fbdev_cma);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
+
+/**
+ * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
+ * @fbdev_cma: The drm_fbdev_cma struct
+ */
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma->fb_helper.fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fbdev_cma->fb_helper.fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev_cma->fb) {
+ drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+ drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+ }
+
+ drm_fb_helper_fini(&fbdev_cma->fb_helper);
+ kfree(fbdev_cma);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
+
+/**
+ * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers lastclose callback.
+ */
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma)
+ drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+/**
+ * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers output_poll_changed
+ * callback.
+ */
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma)
+ drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 100ee48760b..d5d8cea1a67 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,215 +27,323 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
#include <linux/sysrq.h>
+#include <linux/slab.h>
#include <linux/fb.h>
-#include "drmP.h"
-#include "drm_crtc.h"
-#include "drm_fb_helper.h"
-#include "drm_crtc_helper.h"
-
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
static LIST_HEAD(kernel_fb_helper_list);
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independently from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default behaviour can override the
+ * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code provides a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
+ */
+
+/**
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!connector->fb_helper_private)
- return -ENOMEM;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i]);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
-static int my_atoi(const char *name)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
- int val = 0;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_cmdline_mode *mode;
+ struct drm_connector *connector;
+ char *option = NULL;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ connector = fb_helper_conn->connector;
+ mode = &fb_helper_conn->cmdline_mode;
+
+ /* do something on return - turn off connector maybe */
+ if (fb_get_options(connector->name, &option))
+ continue;
+
+ if (drm_mode_parse_command_line_for_connector(option,
+ connector,
+ mode)) {
+ if (mode->force) {
+ const char *s;
+ switch (mode->force) {
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
+ default:
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
+ }
+
+ DRM_INFO("forcing %s connector %s\n",
+ connector->name, s);
+ connector->force = mode->force;
+ }
+
+ DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+ connector->name,
+ mode->xres, mode->yres,
+ mode->refresh_specified ? mode->refresh : 60,
+ mode->rb ? " reduced blanking" : "",
+ mode->margins ? " with margins" : "",
+ mode->interlace ? " interlaced" : "");
}
+
}
+ return 0;
+}
+
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ if (helper->funcs->gamma_get == NULL)
+ return;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+ uint16_t *r_base, *g_base, *b_base;
+
+ if (crtc->funcs->gamma_set == NULL)
+ return;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
/**
- * drm_fb_helper_connector_parse_command_line - parse command line for connector
- * @connector - connector to parse line for
- * @mode_option - per connector mode option
- *
- * This parses the connector specific then generic command lines for
- * modes and options to configure the connector.
- *
- * This uses the same parameters as the fb modedb.c, except for extra
- * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
- *
- * enable/enable Digital/disable bit at the end
+ * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
+ * @info: fbdev registered by the helper
*/
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
- const char *mode_option)
+int drm_fb_helper_debug_enter(struct fb_info *info)
{
- const char *name;
- unsigned int namelen;
- int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
- unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
- int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc_helper_funcs *funcs;
int i;
- enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- if (!fb_help_conn)
+ if (list_empty(&kernel_fb_helper_list))
return false;
- cmdline_mode = &fb_help_conn->cmdline_mode;
- if (!mode_option)
- mode_option = fb_mode_option;
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set =
+ &helper->crtc_info[i].mode_set;
- if (!mode_option) {
- cmdline_mode->specified = false;
- return false;
- }
+ if (!mode_set->crtc->enabled)
+ continue;
- name = mode_option;
- namelen = strlen(name);
- for (i = namelen-1; i >= 0; i--) {
- switch (name[i]) {
- case '@':
- namelen = i;
- if (!refresh_specified && !bpp_specified &&
- !yres_specified) {
- refresh = my_atoi(&name[i+1]);
- refresh_specified = 1;
- if (cvt || rb)
- cvt = 0;
- } else
- goto done;
- break;
- case '-':
- namelen = i;
- if (!bpp_specified && !yres_specified) {
- bpp = my_atoi(&name[i+1]);
- bpp_specified = 1;
- if (cvt || rb)
- cvt = 0;
- } else
- goto done;
- break;
- case 'x':
- if (!yres_specified) {
- yres = my_atoi(&name[i+1]);
- yres_specified = 1;
- } else
- goto done;
- case '0' ... '9':
- break;
- case 'M':
- if (!yres_specified)
- cvt = 1;
- break;
- case 'R':
- if (!cvt)
- rb = 1;
- break;
- case 'm':
- if (!cvt)
- margins = 1;
- break;
- case 'i':
- if (!cvt)
- interlace = 1;
- break;
- case 'e':
- force = DRM_FORCE_ON;
- break;
- case 'D':
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
- (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
- force = DRM_FORCE_ON;
- else
- force = DRM_FORCE_ON_DIGITAL;
- break;
- case 'd':
- force = DRM_FORCE_OFF;
- break;
- default:
- goto done;
+ funcs = mode_set->crtc->helper_private;
+ drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
+ funcs->mode_set_base_atomic(mode_set->crtc,
+ mode_set->fb,
+ mode_set->x,
+ mode_set->y,
+ ENTER_ATOMIC_MODE_SET);
}
}
- if (i < 0 && yres_specified) {
- xres = my_atoi(name);
- res_specified = 1;
- }
-done:
-
- DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
- drm_get_connector_name(connector), xres, yres,
- (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
- "", (margins) ? " with margins" : "", (interlace) ?
- " interlaced" : "");
-
- if (force) {
- const char *s;
- switch (force) {
- case DRM_FORCE_OFF: s = "OFF"; break;
- case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
- default:
- case DRM_FORCE_ON: s = "ON"; break;
- }
- DRM_INFO("forcing %s connector %s\n",
- drm_get_connector_name(connector), s);
- connector->force = force;
- }
-
- if (res_specified) {
- cmdline_mode->specified = true;
- cmdline_mode->xres = xres;
- cmdline_mode->yres = yres;
- }
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_enter);
- if (refresh_specified) {
- cmdline_mode->refresh_specified = true;
- cmdline_mode->refresh = refresh;
- }
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
- if (bpp_specified) {
- cmdline_mode->bpp_specified = true;
- cmdline_mode->bpp = bpp;
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->primary->fb;
}
- cmdline_mode->rb = rb ? true : false;
- cmdline_mode->cvt = cvt ? true : false;
- cmdline_mode->interlace = interlace ? true : false;
- return true;
+ return NULL;
}
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+/**
+ * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_debug_leave(struct fb_info *info)
{
- struct drm_connector *connector;
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_framebuffer *fb;
+ int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- char *option = NULL;
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ funcs = crtc->helper_private;
+ fb = drm_mode_config_fb(crtc);
- /* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (!crtc->enabled)
+ continue;
+
+ if (!fb) {
+ DRM_ERROR("no fb to restore??\n");
continue;
+ }
- drm_fb_helper_connector_parse_command_line(connector, option);
+ drm_fb_helper_restore_lut_atomic(mode_set->crtc);
+ funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+ crtc->y, LEAVE_ATOMIC_MODE_SET);
}
+
return 0;
}
+EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-bool drm_fb_helper_force_kernel_mode(void)
+static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_plane *plane;
+ bool error = false;
+ int i;
+
+ drm_warn_on_modeset_not_all_locked(dev);
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+ if (plane->type != DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_force_disable(plane);
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ struct drm_crtc *crtc = mode_set->crtc;
+ int ret;
+
+ if (crtc->funcs->cursor_set) {
+ ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
+ if (ret)
+ error = true;
+ }
+
+ ret = drm_mode_set_config_internal(mode_set);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+/**
+ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * Use this variant if you need to bypass locking (panic), or already
+ * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
+ */
+static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ return restore_fbdev_mode(fb_helper);
+}
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ */
+bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ bool ret;
+ drm_modeset_lock_all(dev);
+ ret = restore_fbdev_mode(fb_helper);
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
+
+/*
+ * restore fbcon display for all kms driver's using this helper, used for sysrq
+ * and panic handling.
+ */
+static bool drm_fb_helper_force_kernel_mode(void)
{
- int i = 0;
bool ret, error = false;
struct drm_fb_helper *helper;
@@ -243,51 +351,82 @@ bool drm_fb_helper_force_kernel_mode(void)
return false;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
- ret = drm_crtc_helper_set_config(mode_set);
- if (ret)
- error = true;
+ struct drm_device *dev = helper->dev;
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ /* NOTE: we use lockless flag below to avoid grabbing other
+ * modeset locks. So just trylock the underlying mutex
+ * directly:
+ */
+ if (!mutex_trylock(&dev->mode_config.mutex)) {
+ error = true;
+ continue;
}
+
+ ret = drm_fb_helper_restore_fbdev_mode(helper);
+ if (ret)
+ error = true;
+
+ mutex_unlock(&dev->mode_config.mutex);
}
return error;
}
-int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
- DRM_ERROR("panic occurred, switching back to text console\n");
+ /*
+ * It's a waste of time and effort to switch back to text console
+ * if the kernel should reboot before panic messages can be seen.
+ */
+ if (panic_timeout < 0)
+ return 0;
+
+ pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
- return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_panic);
static struct notifier_block paniced = {
.notifier_call = drm_fb_helper_panic,
};
-/**
- * drm_fb_helper_restore - restore the framebuffer console (kernel) config
- *
- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
- */
-void drm_fb_helper_restore(void)
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
{
- bool ret;
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ /* Sometimes user space wants everything disabled, so don't steal the
+ * display if there's a master. */
+ if (dev->primary->master)
+ return false;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->primary->fb)
+ crtcs_bound++;
+ if (crtc->primary->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+
+ return true;
}
-EXPORT_SYMBOL(drm_fb_helper_restore);
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
- drm_fb_helper_restore();
+ bool ret;
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
+static void drm_fb_helper_sysrq(int dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
@@ -297,111 +436,79 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
-static void drm_fb_helper_on(struct fb_info *info)
+static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int i;
+ struct drm_connector *connector;
+ int i, j;
/*
- * For each CRTC in this fb, turn the crtc on then,
- * find all associated encoders and turn them on.
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
*/
- for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
-
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
-
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
-
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
-
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
- }
- }
- }
- }
-}
-
-static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int i;
+ if (oops_in_progress)
+ return;
/*
- * For each CRTC in this fb, find all associated encoders
- * and turn them off, then turn off the CRTC.
+ * For each CRTC in this fb, turn the connectors on/off.
*/
- for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return;
+ }
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ if (!crtc->enabled)
+ continue;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
- }
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
+ /* Walk the connectors & encoders on this fb turning them on/off */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->funcs->dpms(connector, dpms_mode);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.dpms_property, dpms_mode);
}
}
+ drm_modeset_unlock_all(dev);
}
+/**
+ * drm_fb_helper_blank - implementation for ->fb_blank
+ * @blank: desired blanking state
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
- drm_fb_helper_on(info);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
- drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
- drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
break;
}
return 0;
@@ -412,50 +519,101 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
- for (i = 0; i < helper->crtc_count; i++)
+ for (i = 0; i < helper->connector_count; i++)
+ kfree(helper->connector_info[i]);
+ kfree(helper->connector_info);
+ for (i = 0; i < helper->crtc_count; i++) {
kfree(helper->crtc_info[i].mode_set.connectors);
+ if (helper->crtc_info[i].mode_set.mode)
+ drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ }
kfree(helper->crtc_info);
}
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+/**
+ * drm_fb_helper_init - initialize a drm_fb_helper structure
+ * @dev: drm device
+ * @fb_helper: driver-allocated fbdev helper structure to initialize
+ * @crtc_count: maximum number of crtcs to support in this fbdev emulation
+ * @max_conn_count: max connector count
+ *
+ * This allocates the structures for the fbdev helper with the given limits.
+ * Note that this won't yet touch the hardware (through the driver interfaces)
+ * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
+ * to allow driver writes more control over the exact init sequence.
+ *
+ * Drivers must set fb_helper->funcs before calling
+ * drm_fb_helper_initial_config().
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
{
- struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
- int ret = 0;
int i;
- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!helper->crtc_info)
+ if (!max_conn_count)
+ return -EINVAL;
+
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
return -ENOMEM;
- helper->crtc_count = crtc_count;
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info);
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
- helper->crtc_info[i].mode_set.connectors =
+ fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!helper->crtc_info[i].mode_set.connectors) {
- ret = -ENOMEM;
+ if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
- }
- helper->crtc_info[i].mode_set.num_connectors = 0;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- helper->crtc_info[i].crtc_id = crtc->base.id;
- helper->crtc_info[i].mode_set.crtc = crtc;
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
- helper->conn_limit = max_conn_count;
+
return 0;
out_free:
- drm_fb_helper_crtc_free(helper);
+ drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ pr_info("drm: unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
@@ -477,10 +635,23 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
palette[regno] = value;
return 0;
}
+ /*
+ * The driver really shouldn't advertise pseudo/directcolor
+ * visuals if it can't deal with the palette.
+ */
+ if (WARN_ON(!fb_helper->funcs->gamma_set ||
+ !fb_helper->funcs->gamma_get))
+ return -EINVAL;
+
pindex = regno;
if (fb->bits_per_pixel == 16) {
@@ -516,23 +687,30 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
return 0;
}
+/**
+ * drm_fb_helper_setcmap - implementation for ->fb_setcmap
+ * @cmap: cmap to set
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
- int i, rc = 0;
+ int i, j, rc = 0;
int start;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
red = cmap->red;
green = cmap->green;
@@ -540,7 +718,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
transp = cmap->transp;
start = cmap->start;
- for (i = 0; i < cmap->len; i++) {
+ for (j = 0; j < cmap->len; j++) {
u16 hred, hgreen, hblue, htransp = 0xffff;
hred = *red++;
@@ -552,49 +730,22 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
if (rc)
- return rc;
+ goto out;
}
- crtc_funcs->load_lut(crtc);
+ if (crtc_funcs->load_lut)
+ crtc_funcs->load_lut(crtc);
}
+ out:
+ drm_modeset_unlock_all(dev);
return rc;
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
-int drm_fb_helper_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int i;
- int ret;
-
- if (regno > 255)
- return 1;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- ret = setcolreg(crtc, red, green, blue, regno, info);
- if (ret)
- return ret;
-
- crtc_funcs->load_lut(crtc);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
+/**
+ * drm_fb_helper_check_var - implementation for ->fb_check_var
+ * @var: screeninfo to check
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -602,15 +753,18 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
- if (var->pixclock != 0)
+ if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
/* Need to resize the fb object !!! */
- if (var->xres > fb->width || var->yres > fb->height) {
- DRM_ERROR("Requested width/height is greater than current fb "
- "object %dx%d > %dx%d\n", var->xres, var->yres,
- fb->width, fb->height);
- DRM_ERROR("Need resizing code.\n");
+ if (var->bits_per_pixel > fb->bits_per_pixel ||
+ var->xres > fb->width || var->yres > fb->height ||
+ var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+ var->xres, var->yres, var->bits_per_pixel,
+ var->xres_virtual, var->yres_virtual,
+ fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
@@ -684,276 +838,219 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
-/* this will let fbcon do the mode init */
+/**
+ * drm_fb_helper_set_par - implementation for ->fb_set_par
+ * @info: fbdev registered by the helper
+ *
+ * This will let fbcon do the mode init and is called at initialization time by
+ * the fbdev core when registering the driver, and later on through the hotplug
+ * callback.
+ */
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
- struct drm_crtc *crtc;
- int ret;
- int i;
if (var->pixclock != 0) {
- DRM_ERROR("PIXEL CLCOK SET\n");
+ DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
- mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
- }
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ drm_fb_helper_hotplug_event(fb_helper);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
+/**
+ * drm_fb_helper_pan_display - implementation for ->fb_pan_display
+ * @var: updated screen information
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *modeset;
- struct drm_crtc *crtc;
int ret = 0;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
-
- if (i == fb_helper->crtc_count)
- continue;
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+ for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->x = var->xoffset;
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
+ ret = drm_mode_set_config_internal(modeset);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
+ drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
- int preferred_bpp,
- int (*fb_create)(struct drm_device *dev,
- uint32_t fb_width,
- uint32_t fb_height,
- uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth,
- uint32_t surface_bpp,
- struct drm_framebuffer **fb_ptr))
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback and then registers the fbdev and sets up the panic
+ * notifier.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
- unsigned int surface_width = 0, surface_height = 0;
- int new_fb = 0;
+ int ret = 0;
int crtc_count = 0;
- int ret, i, conn_count = 0;
+ int i;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_set *modeset = NULL;
- struct drm_fb_helper *fb_helper;
- uint32_t surface_depth = 24, surface_bpp = 32;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != surface_bpp) {
- surface_depth = surface_bpp = preferred_bpp;
- }
- /* first up get a count of crtcs now in use and new min/maxes width/heights */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
+ if (preferred_bpp != sizes.surface_bpp)
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+ struct drm_cmdline_mode *cmdline_mode;
- if (!fb_help_conn)
- continue;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
- surface_depth = surface_bpp = 8;
+ sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
- surface_depth = 15;
- surface_bpp = 16;
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
break;
case 16:
- surface_depth = surface_bpp = 16;
+ sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
- surface_depth = surface_bpp = 24;
+ sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
- surface_depth = 24;
- surface_bpp = 32;
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
break;
}
break;
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (drm_helper_crtc_in_use(crtc)) {
- if (crtc->desired_mode) {
- if (crtc->desired_mode->hdisplay < fb_width)
- fb_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay < fb_height)
- fb_height = crtc->desired_mode->vdisplay;
-
- if (crtc->desired_mode->hdisplay > surface_width)
- surface_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay > surface_height)
- surface_height = crtc->desired_mode->vdisplay;
- }
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
- return 0;
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
}
- /* do we have an fb already? */
- if (list_empty(&dev->mode_config.fb_kernel_list)) {
- ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
- surface_height, surface_depth, surface_bpp,
- &fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
- struct drm_framebuffer, filp_head);
-
- /* if someone hotplugs something bigger than we have already allocated, we are pwned.
- As really we can't resize an fbdev that is in the wild currently due to fbdev
- not really being designed for the lower layers moving stuff around under it.
- - so in the grand style of things - punt. */
- if ((fb->width < surface_width) ||
- (fb->height < surface_height)) {
- DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
- return -EINVAL;
- }
- }
+ /* push down into drivers */
+ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (ret < 0)
+ return ret;
- info = fb->fbdev;
- fb_helper = info->par;
+ info = fb_helper->fbdev;
- crtc_count = 0;
- /* okay we need to setup new connector sets in the crtcs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- modeset = &fb_helper->crtc_info[crtc_count].mode_set;
- modeset->fb = fb;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count] = connector;
- conn_count++;
- if (conn_count > fb_helper->conn_limit)
- BUG();
- }
- }
-
- for (i = conn_count; i < fb_helper->conn_limit; i++)
- modeset->connectors[i] = NULL;
+ /*
+ * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+ * events, but at init time drm_setup_crtcs needs to be called before
+ * the fb is allocated (since we need to figure out the desired size of
+ * the fb before we can allocate it ...). Hence we need to fix things up
+ * here again.
+ */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- modeset->crtc = crtc;
- crtc_count++;
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
- }
- fb_helper->crtc_count = crtc_count;
- fb_helper->fb = fb;
+ info->var.pixclock = 0;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
- if (new_fb) {
- info->var.pixclock = 0;
- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
- if (ret)
- return ret;
- if (register_framebuffer(info) < 0) {
- fb_dealloc_cmap(&info->cmap);
- return -EINVAL;
- }
- } else {
- drm_fb_helper_set_par(info);
- }
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "registered panic notifier\n");
+ dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
+
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
- list_del(&helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- drm_fb_helper_crtc_free(helper);
- fb_dealloc_cmap(&helper->fb->fbdev->cmap);
+ return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_free);
+/**
+ * drm_fb_helper_fill_fix - initializes fixed fbdev information
+ * @info: fbdev registered by the helper
+ * @pitch: desired pitch
+ * @depth: desired depth
+ *
+ * Helper to fill in the fixed fbdev information useful for a non-accelerated
+ * fbdev emulations. Drivers which support acceleration methods which impose
+ * additional constraints need to set up their own limits.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback.
+ */
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
@@ -966,13 +1063,29 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+/**
+ * drm_fb_helper_fill_var - initalizes variable fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @fb_width: desired fb width
+ * @fb_height: desired fb height
+ *
+ * Sets up the variable fbdev metainformation from the given fb helper instance
+ * and the drm framebuffer allocated in fb_helper->fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback after having allocated the fbdev backing
+ * storage framebuffer.
+ */
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
- info->pseudo_palette = fb->pseudo_palette;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
@@ -1037,3 +1150,524 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (mode->hdisplay > width ||
+ mode->vdisplay > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(drm_has_preferred_mode);
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+ bool prefer_non_interlace;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ prefer_non_interlace = !cmdline_mode->interlace;
+ again:
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ } else if (prefer_non_interlace) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ continue;
+ }
+ return mode;
+ }
+
+ if (prefer_non_interlace) {
+ prefer_non_interlace = false;
+ goto again;
+ }
+
+create_mode:
+ mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+ cmdline_mode);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+}
+EXPORT_SYMBOL(drm_pick_cmdline_mode);
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict)
+ enable = connector->status == connector_status_connected;
+ else
+ enable = connector->status != connector_status_disconnected;
+
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
+ continue;
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ crtcs = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+ if (!crtcs || !modes || !enabled) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ if (!(fb_helper->funcs->initial_config &&
+ fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+ enabled, width, height))) {
+ memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
+ memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+
+ if (!drm_target_cloned(fb_helper,
+ modes, enabled, width, height) &&
+ !drm_target_preferred(fb_helper,
+ modes, enabled, width, height))
+ DRM_ERROR("Unable to find initial modes\n");
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+ width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+ }
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ modeset->fb = NULL;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
+ }
+ }
+
+ /* Clear out any old modes if there are no more connected outputs. */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+ BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+ }
+ }
+out:
+ kfree(crtcs);
+ kfree(modes);
+ kfree(enabled);
+}
+
+/**
+ * drm_fb_helper_initial_config - setup a sane initial connector configuration
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
+ *
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ mutex_lock(&dev->mode_config.mutex);
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ mutex_unlock(&dev->mode_config.mutex);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0)
+ dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+/**
+ * drm_fb_helper_hotplug_event - respond to a hotplug notification by
+ * probing all the outputs attached to the fb
+ * @fb_helper: the drm_fb_helper
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Note that the driver must ensure that this is only called _after_ the fb has
+ * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+ */
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ u32 max_width, max_height;
+
+ if (!fb_helper->fb)
+ return 0;
+
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ fb_helper->delayed_hotplug = true;
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ return 0;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+
+ drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+
+ drm_modeset_lock_all(dev);
+ drm_setup_crtcs(fb_helper);
+ drm_modeset_unlock_all(dev);
+ drm_fb_helper_set_par(fb_helper->fbdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
+static int __init drm_fb_helper_modinit(void)
+{
+ const char *name = "fbcon";
+ struct module *fbcon;
+
+ mutex_lock(&module_mutex);
+ fbcon = find_module(name);
+ mutex_unlock(&module_mutex);
+
+ if (!fbcon)
+ request_module_nowait(name);
+ return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
new file mode 100644
index 00000000000..f9c7fa3d001
--- /dev/null
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_flip_work.h"
+
+/**
+ * drm_flip_work_queue - queue work
+ * @work: the flip-work
+ * @val: the value to queue
+ *
+ * Queues work, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue(struct drm_flip_work *work, void *val)
+{
+ if (kfifo_put(&work->fifo, val)) {
+ atomic_inc(&work->pending);
+ } else {
+ DRM_ERROR("%s fifo full!\n", work->name);
+ work->func(work, val);
+ }
+}
+EXPORT_SYMBOL(drm_flip_work_queue);
+
+/**
+ * drm_flip_work_commit - commit queued work
+ * @work: the flip-work
+ * @wq: the work-queue to run the queued work on
+ *
+ * Trigger work previously queued by drm_flip_work_queue() to run
+ * on a workqueue. The typical usage would be to queue work (via
+ * drm_flip_work_queue()) at any point (from vblank irq and/or
+ * prior), and then from vblank irq commit the queued work.
+ */
+void drm_flip_work_commit(struct drm_flip_work *work,
+ struct workqueue_struct *wq)
+{
+ uint32_t pending = atomic_read(&work->pending);
+ atomic_add(pending, &work->count);
+ atomic_sub(pending, &work->pending);
+ queue_work(wq, &work->worker);
+}
+EXPORT_SYMBOL(drm_flip_work_commit);
+
+static void flip_worker(struct work_struct *w)
+{
+ struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
+ uint32_t count = atomic_read(&work->count);
+ void *val = NULL;
+
+ atomic_sub(count, &work->count);
+
+ while(count--)
+ if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
+ work->func(work, val);
+}
+
+/**
+ * drm_flip_work_init - initialize flip-work
+ * @work: the flip-work to initialize
+ * @size: the max queue depth
+ * @name: debug name
+ * @func: the callback work function
+ *
+ * Initializes/allocates resources for the flip-work
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_flip_work_init(struct drm_flip_work *work, int size,
+ const char *name, drm_flip_func_t func)
+{
+ int ret;
+
+ work->name = name;
+ atomic_set(&work->count, 0);
+ atomic_set(&work->pending, 0);
+ work->func = func;
+
+ ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("could not allocate %s fifo\n", name);
+ return ret;
+ }
+
+ INIT_WORK(&work->worker, flip_worker);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_flip_work_init);
+
+/**
+ * drm_flip_work_cleanup - cleans up flip-work
+ * @work: the flip-work to cleanup
+ *
+ * Destroy resources allocated for the flip-work
+ */
+void drm_flip_work_cleanup(struct drm_flip_work *work)
+{
+ WARN_ON(!kfifo_is_empty(&work->fifo));
+ kfifo_free(&work->fifo);
+}
+EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 08d14df3bb4..021fe5d11df 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -34,72 +34,34 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include <linux/poll.h>
-#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev);
+/* from BKL pushdown */
+DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
+
+static int drm_open_helper(struct file *filp, struct drm_minor *minor);
static int drm_setup(struct drm_device * dev)
{
- int i;
int ret;
- if (dev->driver->firstopen) {
+ if (dev->driver->firstopen &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
}
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- i = drm_dma_setup(dev);
- if (i < 0)
- return i;
- }
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- init_waitqueue_head(&dev->context_wait);
- dev->if_version = 0;
-
- dev->ctx_start = 0;
- dev->lck_start = 0;
+ ret = drm_legacy_dma_setup(dev);
+ if (ret < 0)
+ return ret;
- dev->buf_async = NULL;
- init_waitqueue_head(&dev->buf_readers);
- init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
-
- /*
- * The kernel's context could be created here, but is now created
- * in drm_dma_enqueue. This is more resource-efficient for
- * hardware that does not do DMA, but may mean that
- * drm_select_queue fails between the time the interrupt is
- * initialized and the time the queues are initialized.
- */
-
return 0;
}
@@ -116,39 +78,35 @@ static int drm_setup(struct drm_device * dev)
*/
int drm_open(struct inode *inode, struct file *filp)
{
- struct drm_device *dev = NULL;
- int minor_id = iminor(inode);
+ struct drm_device *dev;
struct drm_minor *minor;
- int retcode = 0;
-
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- return -ENODEV;
-
- if (!(dev = minor->dev))
- return -ENODEV;
-
- retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- spin_lock(&dev->count_lock);
- if (!dev->open_count++) {
- spin_unlock(&dev->count_lock);
- retcode = drm_setup(dev);
- goto out;
- }
- spin_unlock(&dev->count_lock);
- }
-out:
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- BUG_ON((dev->dev_mapping != NULL) &&
- (dev->dev_mapping != inode->i_mapping));
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
+ int retcode;
+ int need_setup = 0;
+
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
+
+ dev = minor->dev;
+ if (!dev->open_count++)
+ need_setup = 1;
+
+ /* share address_space across all char-devs of a single device */
+ filp->f_mapping = dev->anon_inode->i_mapping;
+
+ retcode = drm_open_helper(filp, minor);
+ if (retcode)
+ goto err_undo;
+ if (need_setup) {
+ retcode = drm_setup(dev);
+ if (retcode)
+ goto err_undo;
}
- mutex_unlock(&dev->struct_mutex);
+ return 0;
+err_undo:
+ dev->open_count--;
+ drm_minor_release(minor);
return retcode;
}
EXPORT_SYMBOL(drm_open);
@@ -164,37 +122,31 @@ EXPORT_SYMBOL(drm_open);
*/
int drm_stub_open(struct inode *inode, struct file *filp)
{
- struct drm_device *dev = NULL;
+ struct drm_device *dev;
struct drm_minor *minor;
- int minor_id = iminor(inode);
int err = -ENODEV;
- const struct file_operations *old_fops;
+ const struct file_operations *new_fops;
DRM_DEBUG("\n");
- /* BKL pushdown: note that nothing else serializes idr_find() */
- lock_kernel();
- minor = idr_find(&drm_minors_idr, minor_id);
- if (!minor)
- goto out;
+ mutex_lock(&drm_global_mutex);
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor))
+ goto out_unlock;
- if (!(dev = minor->dev))
- goto out;
+ dev = minor->dev;
+ new_fops = fops_get(dev->driver->fops);
+ if (!new_fops)
+ goto out_release;
- old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
- if (filp->f_op == NULL) {
- filp->f_op = old_fops;
- goto out;
- }
- if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
- fops_put(filp->f_op);
- filp->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
+ replace_fops(filp, new_fops);
+ if (filp->f_op->open)
+ err = filp->f_op->open(inode, filp);
-out:
- unlock_kernel();
+out_release:
+ drm_minor_release(minor);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
return err;
}
@@ -218,18 +170,16 @@ static int drm_cpu_valid(void)
/**
* Called whenever a process opens /dev/drm.
*
- * \param inode device inode.
* \param filp file pointer.
- * \param dev device.
+ * \param minor acquired minor-object.
* \return zero on success or a negative number on failure.
*
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_device * dev)
+static int drm_open_helper(struct file *filp, struct drm_minor *minor)
{
- int minor_id = iminor(inode);
+ struct drm_device *dev = minor->dev;
struct drm_file *priv;
int ret;
@@ -237,26 +187,29 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return -EINVAL;
- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
+ DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current_euid();
- priv->pid = task_pid_nr(current);
- priv->minor = idr_find(&drm_minors_idr, minor_id);
- priv->ioctl_count = 0;
+ priv->pid = get_pid(task_pid(current));
+ priv->minor = minor;
+
/* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
+ priv->always_authenticated = capable(CAP_SYS_ADMIN);
+ priv->authenticated = priv->always_authenticated;
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
+ mutex_init(&priv->fbs_lock);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -264,59 +217,54 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_init_file_private(&priv->prime);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
- goto out_free;
+ goto out_prime_destroy;
}
-
- /* if there is no current master make this fd it */
- mutex_lock(&dev->struct_mutex);
- if (!priv->minor->master) {
+ /* if there is no current master make this fd it, but do not create
+ * any master object for render clients */
+ mutex_lock(&dev->master_mutex);
+ if (drm_is_primary_client(priv) && !priv->minor->master) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
- mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
- goto out_free;
+ goto out_close;
}
priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
-
priv->authenticated = 1;
- mutex_unlock(&dev->struct_mutex);
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master);
if (ret) {
- mutex_lock(&dev->struct_mutex);
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
+ goto out_close;
}
}
- mutex_lock(&dev->struct_mutex);
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, priv, true);
if (ret) {
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
- mutex_unlock(&dev->struct_mutex);
- goto out_free;
+ goto out_close;
}
}
- mutex_unlock(&dev->struct_mutex);
- } else {
+ } else if (drm_is_primary_client(priv)) {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->master_mutex);
mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist);
@@ -334,7 +282,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
pci_dev_put(pci_dev);
}
if (!dev->hose) {
- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+ struct pci_bus *b = list_entry(pci_root_buses.next,
+ struct pci_bus, node);
if (b)
dev->hose = b->sysdata;
}
@@ -342,90 +291,32 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
#endif
return 0;
- out_free:
+
+out_close:
+ mutex_unlock(&dev->master_mutex);
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, priv);
+out_prime_destroy:
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&priv->prime);
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, priv);
+ put_pid(priv->pid);
kfree(priv);
filp->private_data = NULL;
return ret;
}
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->minor->device));
- return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
-/*
- * Reclaim locked buffers; note that this may be a bad idea if the current
- * context doesn't have the hw lock...
- */
-static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
-{
- struct drm_file *file_priv = f->private_data;
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&file_priv->master->lock);
-
- /*
- * Wait for a while.
- */
- do {
- spin_lock_bh(&file_priv->master->lock.spinlock);
- locked = file_priv->master->lock.idle_has_lock;
- spin_unlock_bh(&file_priv->master->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, _end));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
- }
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-}
-
static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
- if (dev->driver->reclaim_buffers_locked &&
- file_priv->master->lock.hw_lock)
- drm_reclaim_locked_buffers(dev, filp);
-
- if (dev->driver->reclaim_buffers_idlelocked &&
- file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-
-
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
- }
}
static void drm_events_release(struct drm_file *file_priv)
@@ -446,13 +337,76 @@ static void drm_events_release(struct drm_file *file_priv)
}
/* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+ list_del(&e->link);
e->destroy(e);
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+}
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+ struct drm_vma_entry *vma, *vma_temp;
+
+ DRM_DEBUG("\n");
+
+ if (dev->driver->lastclose)
+ dev->driver->lastclose(dev);
+ DRM_DEBUG("driver lastclose completed\n");
+
+ if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+
+ drm_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+
+ /* Clear vma list (only built for debugging) */
+ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+ list_del(&vma->head);
+ kfree(vma);
+ }
+
+ drm_legacy_dma_takedown(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_legacy_dev_reinit(dev);
+
+ DRM_DEBUG("lastclose completed\n");
+ return 0;
+}
+
+/**
* Release file.
*
* \param inode device inode
@@ -467,10 +421,11 @@ static void drm_events_release(struct drm_file *file_priv)
int drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
int retcode = 0;
- lock_kernel();
+ mutex_lock(&drm_global_mutex);
DRM_DEBUG("open_count = %d\n", dev->open_count);
@@ -483,21 +438,29 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
dev->open_count);
+ /* Release any auth tokens that might point to this file_priv,
+ (do that under the drm_global_mutex) */
+ if (file_priv->magic)
+ (void) drm_remove_magic(file_priv->master, file_priv->magic);
+
/* if the master has gone away we can't do anything with the lock */
if (file_priv->minor->master)
drm_master_release(dev, filp);
- drm_events_release(file_priv);
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_core_reclaim_buffers(dev, file_priv);
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_release(dev, file_priv);
+ drm_events_release(file_priv);
if (dev->driver->driver_features & DRIVER_MODESET)
drm_fb_release(file_priv);
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
+
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
@@ -513,21 +476,22 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&pos->head);
kfree(pos);
- --dev->ctx_count;
}
}
}
mutex_unlock(&dev->ctxlist_mutex);
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->master_mutex);
if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
struct drm_file *temp;
+
+ mutex_lock(&dev->struct_mutex);
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
- temp->authenticated = 0;
+ temp->authenticated = temp->always_authenticated;
}
/**
@@ -542,6 +506,7 @@ int drm_release(struct inode *inode, struct file *filp)
master->lock.file_priv = NULL;
wake_up_interruptible_all(&master->lock.lock_queue);
}
+ mutex_unlock(&dev->struct_mutex);
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
@@ -551,37 +516,38 @@ int drm_release(struct inode *inode, struct file *filp)
}
}
- /* drop the reference held my the file priv */
- drm_master_put(&file_priv->master);
+ /* drop the master reference held by the file priv */
+ if (file_priv->master)
+ drm_master_put(&file_priv->master);
file_priv->is_master = 0;
+ mutex_unlock(&dev->master_mutex);
+
+ mutex_lock(&dev->struct_mutex);
list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
+
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file_priv->prime);
+
+ put_pid(file_priv->pid);
kfree(file_priv);
/* ========================================================
* End inline drm_release
*/
- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
- spin_lock(&dev->count_lock);
if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count)) {
- DRM_ERROR("Device busy: %d\n",
- atomic_read(&dev->ioctl_count));
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return -EBUSY;
- }
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return drm_lastclose(dev);
+ retcode = drm_lastclose(dev);
+ if (drm_device_is_unplugged(dev))
+ drm_put_dev(dev);
}
- spin_unlock(&dev->count_lock);
+ mutex_unlock(&drm_global_mutex);
- unlock_kernel();
+ drm_minor_release(minor);
return retcode;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c46..f7d71190aad 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -34,7 +34,10 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
-#include "drmP.h"
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
/** @file drm_gem.c
*
@@ -68,46 +71,41 @@
* We make up offsets for buffer objects so we can recognize them at
* mmap time.
*/
+
+/* pgoff in mmap is an unsigned long, so we need to make sure that
+ * the faked up offset will fit
+ */
+
+#if BITS_PER_LONG == 64
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
/**
- * Initialize the GEM device fields
+ * drm_gem_init - Initialize the GEM device fields
+ * @dev: drm_devic structure to initialize
*/
-
int
drm_gem_init(struct drm_device *dev)
{
- struct drm_gem_mm *mm;
+ struct drm_vma_offset_manager *vma_offset_manager;
- spin_lock_init(&dev->object_name_lock);
+ mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
-
- mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
- if (!mm) {
+
+ vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
- dev->mm_private = mm;
-
- if (drm_ht_create(&mm->offset_hash, 19)) {
- kfree(mm);
- return -ENOMEM;
- }
-
- if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- return -ENOMEM;
- }
+ dev->vma_offset_manager = vma_offset_manager;
+ drm_vma_offset_manager_init(vma_offset_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
return 0;
}
@@ -115,68 +113,139 @@ drm_gem_init(struct drm_device *dev)
void
drm_gem_destroy(struct drm_device *dev)
{
- struct drm_gem_mm *mm = dev->mm_private;
- drm_mm_takedown(&mm->offset_manager);
- drm_ht_remove(&mm->offset_hash);
- kfree(mm);
- dev->mm_private = NULL;
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+ kfree(dev->vma_offset_manager);
+ dev->vma_offset_manager = NULL;
}
/**
- * Allocate a GEM object of the specified size with shmfs backing store
+ * drm_gem_object_init - initialize an allocated shmem-backed GEM object
+ * @dev: drm_device the object should be initialized for
+ * @obj: drm_gem_object to initialize
+ * @size: object size
+ *
+ * Initialize an already allocated GEM object of the specified size with
+ * shmfs backing store.
*/
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
{
- struct drm_gem_object *obj;
+ struct file *filp;
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+ drm_gem_private_object_init(dev, obj, size);
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- goto free;
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
- goto free;
+ obj->filp = filp;
- /* Basically we want to disable the OOM killer and handle ENOMEM
- * ourselves by sacrificing pages from cached buffers.
- * XXX shmem_file_[gs]et_gfp_mask()
- */
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_FS |
- __GFP_RECLAIMABLE |
- __GFP_NORETRY |
- __GFP_NOWARN |
- __GFP_NOMEMALLOC);
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
+ * drm_gem_object_init - initialize an allocated private GEM object
+ * @dev: drm_device the object should be initialized for
+ * @obj: drm_gem_object to initialize
+ * @size: object size
+ *
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = NULL;
kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
+ obj->handle_count = 0;
obj->size = size;
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
- goto fput;
+ drm_vma_node_reset(&obj->vma_node);
+}
+EXPORT_SYMBOL(drm_gem_private_object_init);
+
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+{
+ /*
+ * Note: obj->dma_buf can't disappear as long as we still hold a
+ * handle reference in obj->handle_count.
+ */
+ mutex_lock(&filp->prime.lock);
+ if (obj->dma_buf) {
+ drm_prime_remove_buf_handle_locked(&filp->prime,
+ obj->dma_buf);
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
- return obj;
-fput:
- fput(obj->filp);
-free:
- kfree(obj);
- return NULL;
+ mutex_unlock(&filp->prime.lock);
}
-EXPORT_SYMBOL(drm_gem_object_alloc);
/**
- * Removes the mapping from handle to filp for this object.
+ * drm_gem_object_free - release resources bound to userspace handles
+ * @obj: GEM object to clean up.
+ *
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
*/
-static int
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ obj->name = 0;
+ }
+}
+
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+ /* Unbreak the reference cycle if we have an exported dma_buf. */
+ if (obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (WARN_ON(obj->handle_count == 0))
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+
+ mutex_lock(&obj->dev->object_name_lock);
+ if (--obj->handle_count == 0) {
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_exported_dma_buf_free(obj);
+ }
+ mutex_unlock(&obj->dev->object_name_lock);
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+/**
+ * drm_gem_handle_delete - deletes the given file-private handle
+ * @filp: drm file-private structure to use for the handle look up
+ * @handle: userspace handle to delete
+ *
+ * Removes the GEM handle from the @filp lookup table and if this is the last
+ * handle also cleans up linked resources like GEM names.
+ */
+int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
@@ -205,14 +274,97 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, filp);
+ drm_vma_node_revoke(&obj->vma_node, filp->filp);
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, filp);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
+EXPORT_SYMBOL(drm_gem_handle_delete);
/**
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ * @file: drm file-private structure to remove the dumb handle from
+ * @dev: corresponding drm_device
+ * @handle: the dumb handle to remove
+ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+/**
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ * @file_priv: drm file-private structure to register the handle for
+ * @obj: object to register
+ * @handlep: pionter to return the created handle to the caller
+ *
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
+ */
+int
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
+ /*
+ * Get the user-visible handle using idr. Preload and perform
+ * allocation under our spinlock.
+ */
+ idr_preload(GFP_KERNEL);
+ spin_lock(&file_priv->table_lock);
+
+ ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+ drm_gem_object_reference(obj);
+ obj->handle_count++;
+ spin_unlock(&file_priv->table_lock);
+ idr_preload_end();
+ mutex_unlock(&dev->object_name_lock);
+ if (ret < 0) {
+ drm_gem_object_handle_unreference_unlocked(obj);
+ return ret;
+ }
+ *handlep = ret;
+
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * gem_handle_create - create a gem handle for an object
+ * @file_priv: drm file-private structure to register the handle for
+ * @obj: object to register
+ * @handlep: pionter to return the created handle to the caller
+ *
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
@@ -222,30 +374,160 @@ drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
- int ret;
+ mutex_lock(&obj->dev->object_name_lock);
- /*
- * Get the user-visible handle using idr.
+ return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+
+/**
+ * drm_gem_free_mmap_offset - release a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
+ */
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
+}
+EXPORT_SYMBOL(drm_gem_free_mmap_offset);
+
+/**
+ * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
+ * @obj: obj in question
+ * @size: the virtual size
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj, in cases where
+ * the virtual size differs from the physical size (ie. obj->size). Otherwise
+ * just use drm_gem_create_mmap_offset().
+ */
+int
+drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_device *dev = obj->dev;
+
+ return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
+ size / PAGE_SIZE);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
+
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ return drm_gem_create_mmap_offset_size(obj, obj->size);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = file_inode(obj->filp);
+ mapping = inode->i_mapping;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
*/
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
- /* do the allocation under our spinlock */
- spin_lock(&file_priv->table_lock);
- ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
- spin_unlock(&file_priv->table_lock);
- if (ret == -EAGAIN)
- goto again;
+ npages = obj->size >> PAGE_SHIFT;
- if (ret != 0)
- return ret;
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
- drm_gem_object_handle_reference(obj);
- return 0;
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
+ * correct region during swapin. Note that this requires
+ * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
+ * so shmem can relocate pages during swapin if required.
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
+
+ drm_free_large(pages);
+ return ERR_CAST(p);
}
-EXPORT_SYMBOL(drm_gem_handle_create);
+EXPORT_SYMBOL(drm_gem_get_pages);
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+EXPORT_SYMBOL(drm_gem_put_pages);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
@@ -272,6 +554,11 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
+ * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Releases the handle to an mm object.
*/
int
@@ -290,6 +577,11 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
}
/**
+ * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
@@ -308,43 +600,40 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
-again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
+ mutex_lock(&dev->object_name_lock);
+ idr_preload(GFP_KERNEL);
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ ret = -ENOENT;
goto err;
}
- spin_lock(&dev->object_name_lock);
if (!obj->name) {
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
-
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0)
+ ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
+ if (ret < 0)
goto err;
- /* Allocate a reference for the name table. */
- drm_gem_object_reference(obj);
- } else {
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
- ret = 0;
+ obj->name = ret;
}
+ args->name = (uint64_t) obj->name;
+ ret = 0;
+
err:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ idr_preload_end();
+ mutex_unlock(&dev->object_name_lock);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
/**
+ * drm_gem_open - implementation of the GEM_OPEN ioctl
+ * @dev: drm_device
+ * @data: ioctl data
+ * @file_priv: drm file-private structure
+ *
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
@@ -362,18 +651,18 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- spin_lock(&dev->object_name_lock);
+ mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
- if (obj)
+ if (obj) {
drm_gem_object_reference(obj);
- spin_unlock(&dev->object_name_lock);
- if (!obj)
+ } else {
+ mutex_unlock(&dev->object_name_lock);
return -ENOENT;
+ }
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
+ drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@@ -384,6 +673,10 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
}
/**
+ * gem_gem_open - initalizes GEM file-private structures at devnode open time
+ * @dev: drm_device which is being opened by userspace
+ * @file_private: drm file-private structure to set up
+ *
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
@@ -394,21 +687,34 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
spin_lock_init(&file_private->table_lock);
}
-/**
+/*
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
+ struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, file_priv);
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
- drm_gem_object_handle_unreference(obj);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
/**
+ * drm_gem_release - release file-private GEM resources
+ * @dev: drm_device which is being closed by userspace
+ * @file_private: drm file-private structure to clean up
+ *
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
@@ -416,16 +722,29 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_private->object_idr,
- &drm_gem_object_release_handle, NULL);
-
+ &drm_gem_object_release_handle, file_private);
idr_destroy(&file_private->object_idr);
- mutex_unlock(&dev->struct_mutex);
}
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+ WARN_ON(obj->dma_buf);
+
+ if (obj->filp)
+ fput(obj->filp);
+
+ drm_gem_free_mmap_offset(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_release);
+
/**
+ * drm_gem_object_free - free a GEM object
+ * @kref: kref of the object to free
+ *
* Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
*
* Frees the object
*/
@@ -439,51 +758,18 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
-
- fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void
-drm_gem_object_handle_free(struct kref *kref)
-{
- struct drm_gem_object *obj = container_of(kref,
- struct drm_gem_object,
- handlecount);
- struct drm_device *dev = obj->dev;
-
- /* Remove any name for this object */
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
- idr_remove(&dev->object_name_idr, obj->name);
- obj->name = 0;
- spin_unlock(&dev->object_name_lock);
- /*
- * The object name held a reference to this object, drop
- * that now.
- */
- drm_gem_object_unreference(obj);
- } else
- spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
void drm_gem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj);
+
+ mutex_lock(&obj->dev->struct_mutex);
+ drm_vm_open_locked(obj->dev, vma);
+ mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -493,11 +779,69 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = obj->dev;
mutex_lock(&dev->struct_mutex);
+ drm_vm_close_locked(obj->dev, vma);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);
+/**
+ * drm_gem_mmap_obj - memory map a GEM object
+ * @obj: the GEM object to map
+ * @obj_size: the object size to be mapped, in bytes
+ * @vma: VMA for the area to be mapped
+ *
+ * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
+ * provided by the driver. Depending on their requirements, drivers can either
+ * provide a fault handler in their gem_vm_ops (in which case any accesses to
+ * the object will be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring), or mmap the buffer memory
+ * synchronously after calling drm_gem_mmap_obj.
+ *
+ * This function is mainly intended to implement the DMABUF mmap operation, when
+ * the GEM object is not looked up based on its fake offset. To implement the
+ * DRM mmap operation, drivers should use the drm_gem_mmap() function.
+ *
+ * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
+ * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
+ * callers must verify access restrictions before calling this helper.
+ *
+ * NOTE: This function has to be protected with dev->struct_mutex
+ *
+ * Return 0 or success or -EINVAL if the object size is smaller than the VMA
+ * size, or if no gem_vm_ops are provided.
+ */
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+ struct vm_area_struct *vma)
+{
+ struct drm_device *dev = obj->dev;
+
+ lockdep_assert_held(&dev->struct_mutex);
+
+ /* Check for valid size. */
+ if (obj_size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (!dev->driver->gem_vm_ops)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ vma->vm_private_data = obj;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ /* Take a ref for this mapping of the object, so that the fault
+ * handler can dereference the mmap offset's pointer to the object.
+ * This reference is cleaned up by the corresponding vm_close
+ * (which should happen whether the vma was created by this call, or
+ * by a vm_open due to mremap or partial unmap or whatever).
+ */
+ drm_gem_object_reference(obj);
+
+ drm_vm_open_locked(dev, vma);
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_mmap_obj);
/**
* drm_gem_mmap - memory map routine for GEM objects
@@ -507,65 +851,40 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
- * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
- * the object), we set up the driver fault handler so that any accesses
- * to the object can be trapped, to perform migration, GTT binding, surface
- * register allocation, or performance monitoring.
+ * the object) and map it with a call to drm_gem_mmap_obj().
+ *
+ * If the caller is not granted access to the buffer object, the mmap will fail
+ * with EACCES. Please see the vma manager for more information.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
struct drm_gem_object *obj;
- struct drm_hash_item *hash;
- int ret = 0;
+ struct drm_vma_offset_node *node;
+ int ret;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
}
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- obj = map->handle;
- if (!obj->dev->driver->gem_vm_ops) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
- vma->vm_ops = obj->dev->driver->gem_vm_ops;
- vma->vm_private_data = map->handle;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
- /* Take a ref for this mapping of the object, so that the fault
- * handler can dereference the mmap offset's pointer to the object.
- * This reference is cleaned up by the corresponding vm_close
- * (which should happen whether the vma was created by this call, or
- * by a vm_open due to mremap or partial unmap or whatever).
- */
- drm_gem_object_reference(obj);
-
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
-out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
new file mode 100644
index 00000000000..05c97c5350a
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -0,0 +1,371 @@
+/*
+ * drm gem CMA (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_vma_manager.h>
+
+/*
+ * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
+ * @drm: The drm device
+ * @size: The GEM object size
+ *
+ * This function creates and initializes a GEM CMA object of the given size, but
+ * doesn't allocate any memory to back the object.
+ *
+ * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
+ */
+static struct drm_gem_cma_object *
+__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_obj = &cma_obj->base;
+
+ ret = drm_gem_object_init(drm, gem_obj, size);
+ if (ret)
+ goto error;
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret) {
+ drm_gem_object_release(gem_obj);
+ goto error;
+ }
+
+ return cma_obj;
+
+error:
+ kfree(cma_obj);
+ return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_cma_create - allocate an object with the given size
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+ unsigned int size)
+{
+ struct drm_gem_cma_object *cma_obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ cma_obj = __drm_gem_cma_create(drm, size);
+ if (IS_ERR(cma_obj))
+ return cma_obj;
+
+ cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
+ &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+ if (!cma_obj->vaddr) {
+ dev_err(drm->dev, "failed to allocate buffer with size %d\n",
+ size);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ return cma_obj;
+
+error:
+ drm_gem_cma_free_object(&cma_obj->base);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+/*
+ * drm_gem_cma_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
+ struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int size,
+ unsigned int *handle)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ cma_obj = drm_gem_cma_create(drm, size);
+ if (IS_ERR(cma_obj))
+ return cma_obj;
+
+ gem_obj = &cma_obj->base;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+ if (ret)
+ goto err_handle_create;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return cma_obj;
+
+err_handle_create:
+ drm_gem_cma_free_object(gem_obj);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ drm_gem_free_mmap_offset(gem_obj);
+
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+ if (cma_obj->vaddr) {
+ dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
+ } else if (gem_obj->import_attach) {
+ drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+ }
+
+ drm_gem_object_release(gem_obj);
+
+ kfree(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+/*
+ * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_cma_object *cma_obj;
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
+ args->size, &args->handle);
+ return PTR_ERR_OR_ZERO(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+/*
+ * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
+ * function
+ */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm, uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *gem_obj;
+
+ mutex_lock(&drm->struct_mutex);
+
+ gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+ if (!gem_obj) {
+ dev_err(drm->dev, "failed to lookup gem object\n");
+ mutex_unlock(&drm->struct_mutex);
+ return -EINVAL;
+ }
+
+ *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+ drm_gem_object_unreference(gem_obj);
+
+ mutex_unlock(&drm->struct_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
+
+const struct vm_operations_struct drm_gem_cma_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+
+static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
+ cma_obj->vaddr, cma_obj->paddr,
+ vma->vm_end - vma->vm_start);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+
+/*
+ * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+ */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ gem_obj = vma->vm_private_data;
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+ return drm_gem_cma_mmap_obj(cma_obj, vma);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+{
+ struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_device *dev = obj->dev;
+ uint64_t off;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ off = drm_vma_node_start(&obj->vma_node);
+
+ seq_printf(m, "%2d (%2d) %08llx %pad %p %d",
+ obj->name, obj->refcount.refcount.counter,
+ off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
+
+ seq_printf(m, "\n");
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
+#endif
+
+/* low-level interface prime helpers */
+struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
+ cma_obj->paddr, obj->size);
+ if (ret < 0)
+ goto out;
+
+ return sgt;
+
+out:
+ kfree(sgt);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
+ struct sg_table *sgt)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ if (sgt->nents != 1)
+ return ERR_PTR(-EINVAL);
+
+ /* Create a CMA GEM buffer. */
+ cma_obj = __drm_gem_cma_create(dev, size);
+ if (IS_ERR(cma_obj))
+ return ERR_PTR(PTR_ERR(cma_obj));
+
+ cma_obj->paddr = sg_dma_address(sgt->sgl);
+ cma_obj->sgt = sgt;
+
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size);
+
+ return &cma_obj->base;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
+
+int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret < 0)
+ return ret;
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+ return drm_gem_cma_mmap_obj(cma_obj, vma);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
+
+void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ return cma_obj->vaddr;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+
+void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* Nothing to do */
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/drm_global.c
index b17007178a3..3d2e91c4d78 100644
--- a/drivers/gpu/drm/ttm/ttm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -28,46 +28,45 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include "ttm/ttm_module.h"
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <drm/drm_global.h>
-struct ttm_global_item {
+struct drm_global_item {
struct mutex mutex;
void *object;
int refcount;
};
-static struct ttm_global_item glob[TTM_GLOBAL_NUM];
+static struct drm_global_item glob[DRM_GLOBAL_NUM];
-void ttm_global_init(void)
+void drm_global_init(void)
{
int i;
- for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
- struct ttm_global_item *item = &glob[i];
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
mutex_init(&item->mutex);
item->object = NULL;
item->refcount = 0;
}
}
-void ttm_global_release(void)
+void drm_global_release(void)
{
int i;
- for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
- struct ttm_global_item *item = &glob[i];
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
BUG_ON(item->object != NULL);
BUG_ON(item->refcount != 0);
}
}
-int ttm_global_item_ref(struct ttm_global_reference *ref)
+int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
- struct ttm_global_item *item = &glob[ref->global_type];
- void *object;
+ struct drm_global_item *item = &glob[ref->global_type];
mutex_lock(&item->mutex);
if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
}
++item->refcount;
ref->object = item->object;
- object = item->object;
mutex_unlock(&item->mutex);
return 0;
out_err:
@@ -93,11 +91,11 @@ out_err:
item->object = NULL;
return ret;
}
-EXPORT_SYMBOL(ttm_global_item_ref);
+EXPORT_SYMBOL(drm_global_item_ref);
-void ttm_global_item_unref(struct ttm_global_reference *ref)
+void drm_global_item_unref(struct drm_global_reference *ref)
{
- struct ttm_global_item *item = &glob[ref->global_type];
+ struct drm_global_item *item = &glob[ref->global_type];
mutex_lock(&item->mutex);
BUG_ON(item->refcount == 0);
@@ -108,5 +106,5 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
}
mutex_unlock(&item->mutex);
}
-EXPORT_SYMBOL(ttm_global_item_unref);
+EXPORT_SYMBOL(drm_global_item_unref);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index f36b21c5b2e..7e4bae760e2 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -32,33 +32,26 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include "drmP.h"
-#include "drm_hashtab.h"
+#include <drm/drmP.h>
+#include <drm/drm_hashtab.h>
#include <linux/hash.h>
+#include <linux/slab.h>
+#include <linux/export.h>
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
- unsigned int i;
+ unsigned int size = 1 << order;
- ht->size = 1 << order;
ht->order = order;
- ht->fill = 0;
ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(size*sizeof(*ht->table));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
@@ -67,17 +60,14 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -85,45 +75,59 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
- return list;
+ return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+ unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, h_list, head) {
+ if (entry->key == key)
+ return &entry->head;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list, *parent;
+ struct hlist_node *parent;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
- parent = list;
+ parent = &entry->head;
}
if (parent) {
- hlist_add_after(parent, &item->head);
+ hlist_add_after_rcu(parent, &item->head);
} else {
- hlist_add_head(&item->head, h_list);
+ hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
@@ -163,7 +167,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key(ht, key);
+ list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
@@ -178,8 +182,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
- hlist_del_init(list);
- ht->fill--;
+ hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
@@ -187,8 +190,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- hlist_del_init(&item->head);
- ht->fill--;
+ hlist_del_init_rcu(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
@@ -196,10 +198,10 @@ EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
+ if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
kfree(ht->table);
+ else
+ vfree(ht->table);
ht->table = NULL;
}
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index f0f6c6b93f3..86feedd5e6f 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -34,7 +34,7 @@
*/
#include <linux/seq_file.h>
-#include "drmP.h"
+#include <drm/drmP.h>
/**
* Called when "/proc/dri/.../name" is read.
@@ -47,19 +47,17 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
-
if (!master)
return 0;
if (master->unique) {
seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- pci_name(dev->pdev), master->unique);
+ dev->driver->name,
+ dev_name(dev->dev), master->unique);
} else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- pci_name(dev->pdev));
+ seq_printf(m, "%s %s\n",
+ dev->driver->name, dev_name(dev->dev));
}
-
return 0;
}
@@ -110,42 +108,6 @@ int drm_vm_info(struct seq_file *m, void *data)
}
/**
- * Called when "/proc/dri/.../queues" is read.
- */
-int drm_queues_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int i;
- struct drm_queue *q;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, " ctx/flags use fin"
- " blk/rw/rwf wait flushed queued"
- " locks\n\n");
- for (i = 0; i < dev->queue_count; i++) {
- q = dev->queuelist[i];
- atomic_inc(&q->use_count);
- seq_printf(m, "%5d/0x%03x %5d %5d"
- " %5d/%c%c/%c%c%c %5Zd\n",
- i,
- q->flags,
- atomic_read(&q->use_count),
- atomic_read(&q->finalization),
- atomic_read(&q->block_count),
- atomic_read(&q->block_read) ? 'r' : '-',
- atomic_read(&q->block_write) ? 'w' : '-',
- waitqueue_active(&q->read_queue) ? 'r' : '-',
- waitqueue_active(&q->write_queue) ? 'w' : '-',
- waitqueue_active(&q->flush_queue) ? 'f' : '-',
- DRM_BUFCOUNT(&q->waitlist));
- atomic_dec(&q->use_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
* Called when "/proc/dri/.../bufs" is read.
*/
int drm_bufs_info(struct seq_file *m, void *data)
@@ -199,13 +161,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
mutex_lock(&dev->struct_mutex);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank_refcount[crtc]));
+ crtc, atomic_read(&dev->vblank[crtc].refcount));
seq_printf(m, "CRTC %d counter: %d\n",
crtc, drm_vblank_count(dev, crtc));
seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->last_vblank_wait[crtc]);
+ crtc, dev->vblank[crtc].last_wait);
seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank_inmodeset[crtc]);
+ crtc, dev->vblank[crtc].inmodeset);
}
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -222,29 +184,28 @@ int drm_clients_info(struct seq_file *m, void *data)
struct drm_file *priv;
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "a dev pid uid magic ioctls\n\n");
+ seq_printf(m, "a dev pid uid magic\n\n");
list_for_each_entry(priv, &dev->filelist, lhead) {
- seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+ seq_printf(m, "%c %3d %5d %5d %10u\n",
priv->authenticated ? 'y' : 'n',
priv->minor->index,
- priv->pid,
- priv->uid, priv->magic, priv->ioctl_count);
+ pid_vnr(priv->pid),
+ from_kuid_munged(seq_user_ns(m), priv->uid),
+ priv->magic);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
-int drm_gem_one_name_info(int id, void *ptr, void *data)
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct seq_file *m = data;
- seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
-
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
- atomic_read(&obj->handlecount.refcount),
+ obj->handle_count,
atomic_read(&obj->refcount.refcount));
return 0;
}
@@ -255,21 +216,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
seq_printf(m, " name size handles refcount\n");
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
- return 0;
-}
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ mutex_lock(&dev->object_name_lock);
+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+ mutex_unlock(&dev->object_name_lock);
- seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
- seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
- seq_printf(m, "%d gtt total\n", dev->gtt_total);
return 0;
}
@@ -281,22 +232,27 @@ int drm_vma_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
+ unsigned long vma_count = 0;
#if defined(__i386__)
unsigned int pgprot;
#endif
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
- atomic_read(&dev->vma_count),
- high_memory, (u64)virt_to_phys(high_memory));
+ list_for_each_entry(pt, &dev->vmalist, head)
+ vma_count++;
+
+ seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
+ vma_count, high_memory,
+ (void *)(unsigned long)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
if (!vma)
continue;
seq_printf(m,
- "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
- pt->pid, vma->vm_start, vma->vm_end,
+ "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+ pt->pid,
+ (void *)vma->vm_start, (void *)vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d61d185cf04..2f4c4343dfa 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -28,9 +28,11 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
+#include <linux/ratelimit.h>
+#include <linux/export.h>
-#include "drmP.h"
-#include "drm_core.h"
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
@@ -253,10 +255,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
return -EFAULT;
m32.handle = (unsigned long)handle;
- if (m32.handle != (unsigned long)handle && printk_ratelimit())
- printk(KERN_ERR "compat_drm_addmap truncated handle"
- " %p for type %d offset %x\n",
- handle, m32.type, m32.offset);
+ if (m32.handle != (unsigned long)handle)
+ printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
+ " %p for type %d offset %x\n",
+ handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
@@ -313,7 +315,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (err)
return err;
- if (__get_user(c32.auth, &client->auth)
+ if (__get_user(c32.idx, &client->idx)
+ || __get_user(c32.auth, &client->auth)
|| __get_user(c32.pid, &client->pid)
|| __get_user(c32.uid, &client->uid)
|| __get_user(c32.magic, &client->magic)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9b9ff46c237..69c61f392e6 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -33,10 +33,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
-#include "drm_core.h"
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
-#include "linux/pci.h"
+#include <linux/pci.h>
+#include <linux/export.h>
+#ifdef CONFIG_X86
+#include <asm/mtrr.h>
+#endif
/**
* Get the bus id.
@@ -64,6 +68,16 @@ int drm_getunique(struct drm_device *dev, void *data,
return 0;
}
+static void
+drm_unset_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ kfree(master->unique);
+ master->unique = NULL;
+ master->unique_len = 0;
+ master->unique_size = 0;
+}
+
/**
* Set the bus id.
*
@@ -76,14 +90,15 @@ int drm_getunique(struct drm_device *dev, void *data,
* Copies the bus id from userspace into drm_device::unique, and verifies that
* it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
* in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater.
+ * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
+ * UMS was only ever supported on pci devices.
*/
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
+ int ret;
if (master->unique_len || master->unique)
return -EBUSY;
@@ -91,73 +106,48 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique)
- return -ENOMEM;
- if (copy_from_user(master->unique, u->unique, master->unique_len))
- return -EFAULT;
-
- master->unique[master->unique_len] = '\0';
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname)
- return -ENOMEM;
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
-
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3)
+ if (WARN_ON(!dev->pdev))
return -EINVAL;
- domain = bus >> 8;
- bus &= 0xff;
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn)))
- return -EINVAL;
+ ret = drm_pci_set_unique(dev, master, u);
+ if (ret)
+ goto err;
return 0;
+
+err:
+ drm_unset_busid(dev, master);
+ return ret;
}
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
- int len;
+ int ret;
if (master->unique != NULL)
- return -EBUSY;
+ drm_unset_busid(dev, master);
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len)
- DRM_ERROR("buffer overflow");
- else
- master->unique_len = len;
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
- if (dev->devname == NULL)
- return -ENOMEM;
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
+ if (dev->driver->bus && dev->driver->bus->set_busid) {
+ ret = dev->driver->bus->set_busid(dev, master);
+ if (ret) {
+ drm_unset_busid(dev, master);
+ return ret;
+ }
+ } else {
+ if (WARN(dev->unique == NULL,
+ "No drm_bus.set_busid() implementation provided by "
+ "%ps. Use drm_dev_set_unique() to set the unique "
+ "name explicitly.", dev->driver))
+ return -EINVAL;
+
+ master->unique = kstrdup(dev->unique, GFP_KERNEL);
+ if (master->unique)
+ master->unique_len = strlen(dev->unique);
+ }
return 0;
}
@@ -185,14 +175,11 @@ int drm_getmap(struct drm_device *dev, void *data,
int i;
idx = map->offset;
-
- mutex_lock(&dev->struct_mutex);
- if (idx < 0) {
- mutex_unlock(&dev->struct_mutex);
+ if (idx < 0)
return -EINVAL;
- }
i = 0;
+ mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
@@ -210,7 +197,17 @@ int drm_getmap(struct drm_device *dev, void *data,
map->type = r_list->map->type;
map->flags = r_list->map->flags;
map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = r_list->map->mtrr;
+
+#ifdef CONFIG_X86
+ /*
+ * There appears to be exactly one user of the mtrr index: dritest.
+ * It's easy enough to keep it working on non-PAT systems.
+ */
+ map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr);
+#else
+ map->mtrr = -1;
+#endif
+
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -233,29 +230,30 @@ int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
- struct drm_file *pt;
- int idx;
- int i;
- idx = client->idx;
- mutex_lock(&dev->struct_mutex);
-
- i = 0;
- list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx) {
- client->auth = pt->authenticated;
- client->pid = pt->pid;
- client->uid = pt->uid;
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
- }
+ /*
+ * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
+ * not breaking completely. Userspace tools stop enumerating one they
+ * get -EINVAL, hence this is the return value we need to hand back for
+ * no clients tracked.
+ *
+ * Unfortunately some clients (*cough* libva *cough*) use this in a fun
+ * attempt to figure out whether they're authenticated or not. Since
+ * that's the only thing they care about, give it to the directly
+ * instead of walking one giant list.
+ */
+ if (client->idx == 0) {
+ client->auth = file_priv->authenticated;
+ client->pid = pid_vnr(file_priv->pid);
+ client->uid = from_kuid_munged(current_user_ns(),
+ file_priv->uid);
+ client->magic = 0;
+ client->iocs = 0;
+
+ return 0;
+ } else {
+ return -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
-
- return -EINVAL;
}
/**
@@ -272,24 +270,87 @@ int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_stats *stats = data;
- int i;
+ /* Clear stats to prevent userspace from eating its stack garbage. */
memset(stats, 0, sizeof(*stats));
- mutex_lock(&dev->struct_mutex);
+ return 0;
+}
- for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK)
- stats->data[i].value =
- (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+/**
+ * Get device/driver capabilities
+ */
+int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ case DRM_CAP_VBLANK_HIGH_CRTC:
+ req->value = 1;
+ break;
+ case DRM_CAP_DUMB_PREFERRED_DEPTH:
+ req->value = dev->mode_config.preferred_depth;
+ break;
+ case DRM_CAP_DUMB_PREFER_SHADOW:
+ req->value = dev->mode_config.prefer_shadow;
+ break;
+ case DRM_CAP_PRIME:
+ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+ req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ break;
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ break;
+ case DRM_CAP_ASYNC_PAGE_FLIP:
+ req->value = dev->mode_config.async_page_flip;
+ break;
+ case DRM_CAP_CURSOR_WIDTH:
+ if (dev->mode_config.cursor_width)
+ req->value = dev->mode_config.cursor_width;
else
- stats->data[i].value = atomic_read(&dev->counts[i]);
- stats->data[i].type = dev->types[i];
+ req->value = 64;
+ break;
+ case DRM_CAP_CURSOR_HEIGHT:
+ if (dev->mode_config.cursor_height)
+ req->value = dev->mode_config.cursor_height;
+ else
+ req->value = 64;
+ break;
+ default:
+ return -EINVAL;
}
+ return 0;
+}
- stats->count = dev->counters;
-
- mutex_unlock(&dev->struct_mutex);
+/**
+ * Set device/driver capabilities
+ */
+int
+drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_set_client_cap *req = data;
+
+ switch (req->capability) {
+ case DRM_CLIENT_CAP_STEREO_3D:
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->stereo_allowed = req->value;
+ break;
+ case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
+ if (!drm_universal_planes)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->universal_planes = req->value;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -322,8 +383,11 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
+ * Version 1.4 has proper PCI domain support
*/
- drm_set_busid(dev, file_priv);
+ retcode = drm_set_busid(dev, file_priv);
+ if (retcode)
+ goto done;
}
}
@@ -334,9 +398,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
retcode = -EINVAL;
goto done;
}
-
- if (dev->driver->set_version)
- dev->driver->set_version(dev, sv);
}
done:
@@ -355,3 +416,4 @@ int drm_noop(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
return 0;
}
+EXPORT_SYMBOL(drm_noop);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 7998ee66b31..0de123afdb3 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1,6 +1,5 @@
-/**
- * \file drm_irq.c
- * IRQ support
+/*
+ * drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
@@ -33,136 +32,194 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <drm/drmP.h>
+#include "drm_trace.h"
#include <linux/interrupt.h> /* For task queue support */
+#include <linux/slab.h>
#include <linux/vgaarb.h>
-/**
- * Get interrupt from bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_irq_busid structure.
- * \return zero on success or a negative number on failure.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
+#include <linux/export.h>
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) \
+ ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
*/
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
{
- struct drm_irq_busid *p = data;
+ memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
+}
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 vblcount;
+ s64 diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank[crtc].enabled = false;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
+ if (!count)
+ vblrc = 0;
- p->irq = dev->pdev->irq;
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->vblank[crtc].count);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+ atomic_inc(&dev->vblank[crtc].count);
+ smp_mb__after_atomic();
+ }
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
- return 0;
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
static void vblank_disable_fn(unsigned long arg)
{
- struct drm_device *dev = (struct drm_device *)arg;
+ struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_device *dev = vblank->dev;
unsigned long irqflags;
- int i;
+ int crtc = vblank->crtc;
if (!dev->vblank_disable_allowed)
return;
- for (i = 0; i < dev->num_crtcs; i++) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- dev->vblank_enabled[i]) {
- DRM_DEBUG("disabling vblank on crtc %d\n", i);
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- dev->vblank_enabled[i] = 0;
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
+ DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
+ vblank_disable_and_save(dev, crtc);
}
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
+/**
+ * drm_vblank_cleanup - cleanup vblank support
+ * @dev: DRM device
+ *
+ * This function cleans up any resources allocated in drm_vblank_init.
+ */
void drm_vblank_cleanup(struct drm_device *dev)
{
+ int crtc;
+
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
- del_timer(&dev->vblank_disable_timer);
-
- vblank_disable_fn((unsigned long)dev);
+ for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+ del_timer_sync(&dev->vblank[crtc].disable_timer);
+ vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
+ }
- kfree(dev->vbl_queue);
- kfree(dev->_vblank_count);
- kfree(dev->vblank_refcount);
- kfree(dev->vblank_enabled);
- kfree(dev->last_vblank);
- kfree(dev->last_vblank_wait);
- kfree(dev->vblank_inmodeset);
+ kfree(dev->vblank);
dev->num_crtcs = 0;
}
+EXPORT_SYMBOL(drm_vblank_cleanup);
+/**
+ * drm_vblank_init - initialize vblank support
+ * @dev: drm_device
+ * @num_crtcs: number of crtcs supported by @dev
+ *
+ * This function initializes vblank support for @num_crtcs display pipelines.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
- setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
- (unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
- dev->num_crtcs = num_crtcs;
-
- dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vbl_queue)
- goto err;
-
- dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
- if (!dev->_vblank_count)
- goto err;
-
- dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
- GFP_KERNEL);
- if (!dev->vblank_refcount)
- goto err;
-
- dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_enabled)
- goto err;
+ spin_lock_init(&dev->vblank_time_lock);
- dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank)
- goto err;
-
- dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
- if (!dev->last_vblank_wait)
- goto err;
+ dev->num_crtcs = num_crtcs;
- dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
- if (!dev->vblank_inmodeset)
+ dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+ if (!dev->vblank)
goto err;
- /* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
- init_waitqueue_head(&dev->vbl_queue[i]);
- atomic_set(&dev->_vblank_count[i], 0);
- atomic_set(&dev->vblank_refcount[i], 0);
+ dev->vblank[i].dev = dev;
+ dev->vblank[i].crtc = i;
+ init_waitqueue_head(&dev->vblank[i].queue);
+ setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
+ (unsigned long)&dev->vblank[i]);
}
- dev->vblank_disable_allowed = 0;
+ DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
+ dev->vblank_disable_allowed = false;
return 0;
@@ -184,71 +241,68 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
if (!dev->irq_enabled)
return;
- if (state)
- dev->driver->irq_uninstall(dev);
- else {
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
+ if (state) {
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
}
}
/**
- * Install IRQ handler.
- *
- * \param dev DRM device.
+ * drm_irq_install - install IRQ handler
+ * @dev: DRM device
+ * @irq: IRQ number to install the handler for
*
* Initializes the IRQ related data. Installs the handler, calling the driver
- * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
- * before and after the installation.
+ * irq_preinstall() and irq_postinstall() functions before and after the
+ * installation.
+ *
+ * This is the simplified helper interface provided for drivers with no special
+ * needs. Drivers which need to install interrupt handlers for multiple
+ * interrupts must instead set drm_device->irq_enabled to signal the DRM core
+ * that vblank interrupts are available.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
-int drm_irq_install(struct drm_device *dev)
+int drm_irq_install(struct drm_device *dev, int irq)
{
- int ret = 0;
+ int ret;
unsigned long sh_flags = 0;
- char *irqname;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (dev->pdev->irq == 0)
+ if (irq == 0)
return -EINVAL;
- mutex_lock(&dev->struct_mutex);
-
/* Driver must have been initialized */
- if (!dev->dev_private) {
- mutex_unlock(&dev->struct_mutex);
+ if (!dev->dev_private)
return -EINVAL;
- }
- if (dev->irq_enabled) {
- mutex_unlock(&dev->struct_mutex);
+ if (dev->irq_enabled)
return -EBUSY;
- }
- dev->irq_enabled = 1;
- mutex_unlock(&dev->struct_mutex);
+ dev->irq_enabled = true;
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+ DRM_DEBUG("irq=%d\n", irq);
/* Before installing handler */
- dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
/* Install handler */
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
- if (dev->devname)
- irqname = dev->devname;
- else
- irqname = dev->driver->name;
-
- ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
- sh_flags, irqname, dev);
+ ret = request_irq(irq, dev->driver->irq_handler,
+ sh_flags, dev->driver->name, dev);
if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
+ dev->irq_enabled = false;
return ret;
}
@@ -256,11 +310,16 @@ int drm_irq_install(struct drm_device *dev)
vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
/* After installing handler */
- ret = dev->driver->irq_postinstall(dev);
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+
if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
+ dev->irq_enabled = false;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ free_irq(irq, dev);
+ } else {
+ dev->irq = irq;
}
return ret;
@@ -268,53 +327,65 @@ int drm_irq_install(struct drm_device *dev)
EXPORT_SYMBOL(drm_irq_install);
/**
- * Uninstall the IRQ handler.
+ * drm_irq_uninstall - uninstall the IRQ handler
+ * @dev: DRM device
+ *
+ * Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
+ * This should only be called by drivers which used drm_irq_install() to set up
+ * their interrupt handler. Other drivers must only reset
+ * drm_device->irq_enabled to false.
*
- * \param dev DRM device.
+ * Note that for kernel modesetting drivers it is a bug if this function fails.
+ * The sanity checks are only to catch buggy user modesetting drivers which call
+ * the same function through an ioctl.
*
- * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
-int drm_irq_uninstall(struct drm_device * dev)
+int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
- int irq_enabled, i;
+ bool irq_enabled;
+ int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
- dev->irq_enabled = 0;
- mutex_unlock(&dev->struct_mutex);
+ dev->irq_enabled = false;
/*
* Wake up any waiters so they don't hang.
*/
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queue[i]);
- dev->vblank_enabled[i] = 0;
- dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ wake_up(&dev->vblank[i].queue);
+ dev->vblank[i].enabled = false;
+ dev->vblank[i].last =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
- dev->driver->irq_uninstall(dev);
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
- free_irq(dev->pdev->irq, dev);
+ free_irq(dev->irq, dev);
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
-/**
+/*
* IRQ control ioctl.
*
* \param inode device inode.
@@ -329,32 +400,323 @@ int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
+ int ret = 0, irq;
- /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+ /* if we haven't irq we fallback for compatibility reasons -
+ * this used to be a separate function in drm_dma.h
+ */
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ /* UMS was only ever support on pci devices. */
+ if (WARN_ON(!dev->pdev))
+ return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
+ irq = dev->pdev->irq;
+
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->pdev->irq)
+ ctl->irq != irq)
return -EINVAL;
- return drm_irq_install(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_irq_install(dev, irq);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
case DRM_UNINST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- return drm_irq_uninstall(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_irq_uninstall(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
default:
return -EINVAL;
}
}
/**
+ * drm_calc_timestamping_constants - calculate vblank timestamp constants
+ * @crtc: drm_crtc whose timestamp constants should be updated.
+ * @mode: display mode containing the scanout timings
+ *
+ * Calculate and store various constants which are later
+ * needed by vblank and swap-completion timestamping, e.g,
+ * by drm_calc_vbltimestamp_from_scanoutpos(). They are
+ * derived from CRTC's true scanout timing, so they take
+ * things like panel scaling or other adjustments into account.
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ int dotclock = mode->crtc_clock;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
+
+ /*
+ * Convert scanline length in pixels and video
+ * dot clock to line duration, frame duration
+ * and pixel duration in nanoseconds:
+ */
+ pixeldur_ns = 1000000 / dotclock;
+ linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
+ framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
+
+ /*
+ * Fields of interlaced scanout modes are only half a frame duration.
+ */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ framedur_ns /= 2;
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, mode->crtc_htotal,
+ mode->crtc_vtotal, mode->crtc_vdisplay);
+ DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, dotclock, framedur_ns,
+ linedur_ns, pixeldur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
+ * @dev: DRM device
+ * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default,
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
+ * @refcrtc: CRTC which defines scanout timing
+ * @mode: mode which defines the scanout timings
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be called from
+ * within get_vblank_timestamp() implementation of a kms driver to implement the
+ * actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * Returns:
+ * Negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid CRTC.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ const struct drm_crtc *refcrtc,
+ const struct drm_display_mode *mode)
+{
+ ktime_t stime, etime, mono_time_offset;
+ struct timeval tv_etime;
+ int vbl_status;
+ int vpos, hpos, i;
+ int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /*
+ * Get vertical and horizontal scanout position vpos, hpos,
+ * and bounding timestamps stime, etime, pre/post query.
+ */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
+ &hpos, &stime, &etime);
+
+ /*
+ * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
+ * CLOCK_REALTIME is requested.
+ */
+ if (!drm_timestamp_monotonic)
+ mono_time_offset = ktime_get_monotonic_offset();
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
+ }
+
+ /* Compute uncertainty in timestamp of scanout position query. */
+ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
+
+ if (!drm_timestamp_monotonic)
+ etime = ktime_sub(etime, mono_time_offset);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ if (delta_ns < 0)
+ etime = ktime_add_ns(etime, -delta_ns);
+ else
+ etime = ktime_sub_ns(etime, delta_ns);
+ *vblank_time = ktime_to_timeval(etime);
+
+ DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ crtc, (int)vbl_status, hpos, vpos,
+ (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
+ (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
+ duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+static struct timeval get_drm_timestamp(void)
+{
+ ktime_t now;
+
+ now = ktime_get();
+ if (!drm_timestamp_monotonic)
+ now = ktime_sub(now, ktime_get_monotonic_offset());
+
+ return ktime_to_timeval(now);
+}
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval
+ * @dev: DRM device
+ * @crtc: which CRTC's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default,
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified CRTC. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns:
+ * Non-zero if timestamp is considered to be very precise, zero otherwise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
+{
+ int ret;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
+ */
+ *tvblank = get_drm_timestamp();
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
* @crtc: which counter to retrieve
@@ -362,14 +724,92 @@ int drm_control(struct drm_device *dev, void *data,
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
+ *
+ * Returns:
+ * The software vblank counter.
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
- return atomic_read(&dev->_vblank_count[crtc]);
+ return atomic_read(&dev->vblank[crtc].count);
}
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current vblank counter value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->vblank[crtc].count);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ smp_rmb();
+ } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
+
+ return cur_vblank;
+}
+EXPORT_SYMBOL(drm_vblank_count_and_time);
+
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ e->pipe = crtc;
+ send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
+/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
@@ -387,7 +827,8 @@ EXPORT_SYMBOL(drm_vblank_count);
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- u32 cur_vblank, diff;
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
/*
* Interrupts were disabled prior to this call, so deal with counter
@@ -395,20 +836,77 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
*/
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- diff = cur_vblank - dev->last_vblank[crtc];
- if (cur_vblank < dev->last_vblank[crtc]) {
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
+ diff = cur_vblank - dev->vblank[crtc].last;
+ if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
- atomic_add(diff, &dev->_vblank_count[crtc]);
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->vblank[crtc].count) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ }
+
+ smp_mb__before_atomic();
+ atomic_add(diff, &dev->vblank[crtc].count);
+ smp_mb__after_atomic();
+}
+
+/**
+ * drm_vblank_enable - enable the vblank interrupt on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ */
+static int drm_vblank_enable(struct drm_device *dev, int crtc)
+{
+ int ret = 0;
+
+ assert_spin_locked(&dev->vbl_lock);
+
+ spin_lock(&dev->vblank_time_lock);
+
+ if (!dev->vblank[crtc].enabled) {
+ /*
+ * Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank[crtc].refcount);
+ else {
+ dev->vblank[crtc].enabled = true;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+
+ spin_unlock(&dev->vblank_time_lock);
+
+ return ret;
}
/**
@@ -419,7 +917,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
- * RETURNS
+ * This is the legacy version of drm_crtc_vblank_get().
+ *
+ * Returns:
* Zero on success, nonzero on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
@@ -429,20 +929,11 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
- if (!dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
- atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
- }
- }
+ if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
+ ret = drm_vblank_enable(dev, crtc);
} else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
+ if (!dev->vblank[crtc].enabled) {
+ atomic_dec(&dev->vblank[crtc].refcount);
ret = -EINVAL;
}
}
@@ -453,46 +944,193 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_get);
/**
+ * drm_crtc_vblank_get - get a reference count on vblank events
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * This is the native kms version of drm_vblank_off().
+ *
+ * Returns:
+ * Zero on success, nonzero on failure.
+ */
+int drm_crtc_vblank_get(struct drm_crtc *crtc)
+{
+ return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_get);
+
+/**
* drm_vblank_put - give up ownership of vblank events
* @dev: DRM device
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
- * if possible.
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ *
+ * This is the legacy version of drm_crtc_vblank_put().
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
- BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+ BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
/* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+ if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
+ (drm_vblank_offdelay > 0))
+ mod_timer(&dev->vblank[crtc].disable_timer,
+ jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
+/**
+ * drm_crtc_vblank_put - give up ownership of vblank events
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ *
+ * This is the native kms version of drm_vblank_put().
+ */
+void drm_crtc_vblank_put(struct drm_crtc *crtc)
+{
+ drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_put);
+
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to shut down the vblank interrupt handling when
+ * disabling a crtc. This function ensures that the latest vblank frame count is
+ * stored so that drm_vblank_on() can restore it again.
+ *
+ * Drivers must use this function when the hardware vblank counter can get
+ * reset, e.g. when suspending.
+ *
+ * This is the legacy version of drm_crtc_vblank_off().
+ */
void drm_vblank_off(struct drm_device *dev, int crtc)
{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
unsigned long irqflags;
+ unsigned int seq;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblank_disable_and_save(dev, crtc);
+ wake_up(&dev->vblank[crtc].queue);
+
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ DRM_DEBUG("Sending premature vblank event on disable: \
+ wanted %d, current %d\n",
+ e->event.sequence, seq);
+ list_del(&e->base.link);
+ drm_vblank_put(dev, e->pipe);
+ send_vblank_event(dev, e, seq, &now);
+ }
+ spin_unlock(&dev->event_lock);
+
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
/**
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to shut down the vblank interrupt handling when
+ * disabling a crtc. This function ensures that the latest vblank frame count is
+ * stored so that drm_vblank_on can restore it again.
+ *
+ * Drivers must use this function when the hardware vblank counter can get
+ * reset, e.g. when suspending.
+ *
+ * This is the native kms version of drm_vblank_off().
+ */
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
+{
+ drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_off);
+
+/**
+ * drm_vblank_on - enable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
+ * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * in driver load code to reflect the current hardware state of the crtc.
+ *
+ * This is the legacy version of drm_crtc_vblank_on().
+ */
+void drm_vblank_on(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* re-enable interrupts if there's are users left */
+ if (atomic_read(&dev->vblank[crtc].refcount) != 0)
+ WARN_ON(drm_vblank_enable(dev, crtc));
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_on);
+
+/**
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
+ * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * in driver load code to reflect the current hardware state of the crtc.
+ *
+ * This is the native kms version of drm_vblank_on().
+ */
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_on);
+
+/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
- * @post: post or pre mode set?
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
+ *
+ * This is done by grabbing a temporary vblank reference to ensure that the
+ * vblank interrupt keeps running across the modeset sequence. With this the
+ * software-side vblank frame counting will ensure that there are no jumps or
+ * discontinuities.
+ *
+ * Unfortunately this approach is racy and also doesn't work when the vblank
+ * interrupt stops running, e.g. across system suspend resume. It is therefore
+ * highly recommended that drivers use the newer drm_vblank_off() and
+ * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
+ * using "cooked" software vblank frame counters and not relying on any hardware
+ * counters.
+ *
+ * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
+ * again.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
@@ -500,32 +1138,44 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 0x1;
+ if (!dev->vblank[crtc].inmodeset) {
+ dev->vblank[crtc].inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
- dev->vblank_inmodeset[crtc] |= 0x2;
+ dev->vblank[crtc].inmodeset |= 0x2;
}
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
+/**
+ * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * This function again drops the temporary vblank reference acquired in
+ * drm_vblank_pre_modeset.
+ */
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
- if (dev->vblank_inmodeset[crtc]) {
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
+ if (dev->vblank[crtc].inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = 1;
+ dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- if (dev->vblank_inmodeset[crtc] & 0x2)
+ if (dev->vblank[crtc].inmodeset & 0x2)
drm_vblank_put(dev, crtc);
- dev->vblank_inmodeset[crtc] = 0;
+ dev->vblank[crtc].inmodeset = 0;
}
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
-/**
+/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
@@ -540,17 +1190,19 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
- goto out;
+ return 0;
+
+ /* KMS drivers handle this internally */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
@@ -560,12 +1212,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
drm_vblank_post_modeset(dev, crtc);
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
@@ -576,12 +1226,16 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
struct timeval now;
unsigned long flags;
unsigned int seq;
+ int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
e->pipe = pipe;
+ e->base.pid = current->pid;
e->event.base.type = DRM_EVENT_VBLANK;
e->event.base.length = sizeof e->event;
e->event.user_data = vblwait->request.signal;
@@ -589,17 +1243,16 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
- do_gettimeofday(&now);
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- return -ENOMEM;
+ ret = -EBUSY;
+ goto err_unlock;
}
file_priv->event_space -= sizeof e->event;
- seq = drm_vblank_count(dev, pipe);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1 << 23)) {
vblwait->request.sequence = seq + 1;
@@ -609,23 +1262,33 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
vblwait->request.sequence, seq, pipe);
+ trace_drm_vblank_event_queued(current->pid, pipe,
+ vblwait->request.sequence);
+
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ drm_vblank_put(dev, pipe);
+ send_vblank_event(dev, e, seq, &now);
+ vblwait->reply.sequence = seq;
} else {
+ /* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
+ vblwait->reply.sequence = vblwait->request.sequence;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+err_put:
+ drm_vblank_put(dev, pipe);
+ return ret;
}
-/**
+/*
* Wait for VBLANK.
*
* \param inode device inode.
@@ -636,33 +1299,38 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
*
* This function enables the vblank interrupt on the pipe requested, then
* sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
* after a timeout with no further vblank waits scheduled).
*/
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
- int ret = 0;
- unsigned int flags, seq, crtc;
+ int ret;
+ unsigned int flags, seq, crtc, high_crtc;
- if ((!dev->pdev->irq) || (!dev->irq_enabled))
+ if (!dev->irq_enabled)
return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
if (vblwait->request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
return -EINVAL;
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
+ high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_crtc)
+ crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
if (crtc >= dev->num_crtcs)
return -EINVAL;
@@ -684,8 +1352,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
- if (flags & _DRM_VBLANK_EVENT)
+ if (flags & _DRM_VBLANK_EVENT) {
+ /* must hold on to the vblank ref until the event fires
+ * drm_vblank_put will be called asynchronously
+ */
return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+ }
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
@@ -694,20 +1366,20 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
- dev->last_vblank_wait[crtc] = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+ dev->vblank[crtc].last_wait = vblwait->request.sequence;
+ DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
+ !dev->vblank[crtc].enabled ||
!dev->irq_enabled));
if (ret != -EINTR) {
struct timeval now;
- do_gettimeofday(&now);
-
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+
DRM_DEBUG("returning %d to client\n",
vblwait->reply.sequence);
} else {
@@ -719,15 +1391,14 @@ done:
return ret;
}
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
unsigned int seq;
- do_gettimeofday(&now);
- seq = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -740,15 +1411,14 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq);
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ trace_drm_vblank_event(crtc, seq);
}
/**
@@ -759,13 +1429,68 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*/
-void drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+ unsigned long irqflags;
+
if (!dev->num_crtcs)
- return;
+ return false;
- atomic_inc(&dev->_vblank_count[crtc]);
- DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank[crtc].enabled) {
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return false;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
+ */
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->vblank[crtc].count);
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&dev->vblank[crtc].count);
+ smp_mb__after_atomic();
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
+ wake_up(&dev->vblank[crtc].queue);
drm_handle_vblank_events(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a516c3..f6452682141 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -33,10 +33,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <linux/export.h>
+#include <drm/drmP.h>
static int drm_notifier(void *priv);
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
/**
* Lock ioctl.
*
@@ -67,10 +70,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
lock->context, task_pid_nr(current),
master->lock.hw_lock->lock, lock->flags);
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
- if (lock->context < 0)
- return -EINVAL;
-
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters++;
@@ -87,12 +86,13 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
+ mutex_unlock(&drm_global_mutex);
schedule();
+ mutex_lock(&drm_global_mutex);
if (signal_pending(current)) {
ret = -EINTR;
break;
@@ -122,9 +122,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
}
- if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
- dev->driver->dma_ready(dev);
-
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
{
if (dev->driver->dma_quiescent(dev)) {
@@ -134,12 +131,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
}
- if (dev->driver->kernel_context_switch &&
- dev->last_context != lock->context) {
- dev->driver->kernel_context_switch(dev, dev->last_context,
- lock->context);
- }
-
return 0;
}
@@ -165,17 +156,8 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
- /* kernel_context_switch isn't used by any of the x86 drm
- * modules but is required by the Sparc driver.
- */
- if (dev->driver->kernel_context_switch_unlock)
- dev->driver->kernel_context_switch_unlock(dev);
- else {
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
}
unblock_all_signals();
@@ -191,6 +173,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
+static
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
@@ -227,7 +210,6 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
return 0;
}
-EXPORT_SYMBOL(drm_lock_take);
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -295,7 +277,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
-EXPORT_SYMBOL(drm_lock_free);
/**
* If we get here, it means that the process has called DRM_IOCTL_LOCK
@@ -343,7 +324,7 @@ static int drm_notifier(void *priv)
void drm_idlelock_take(struct drm_lock_data *lock_data)
{
- int ret = 0;
+ int ret;
spin_lock_bh(&lock_data->spinlock);
lock_data->kernel_waiters++;
@@ -380,7 +361,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
@@ -388,5 +368,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index e4865f99989..00c67c0f238 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -34,26 +34,8 @@
*/
#include <linux/highmem.h>
-#include "drmP.h"
-
-/**
- * Called when "/proc/dri/%dev%/mem" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param len requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * No-op.
- */
-int drm_mem_info(char *buf, char **start, off_t offset,
- int len, int *eof, void *data)
-{
- return 0;
-}
+#include <linux/export.h>
+#include <drm/drmP.h>
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
@@ -77,7 +59,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
- if (!agpmem)
+ if (&agpmem->head == &dev->agp->memory)
return NULL;
/*
@@ -99,31 +81,23 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
- return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
/** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(struct agp_memory * handle, int pages)
{
- return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+ agp_free_memory(handle);
}
-EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory * handle, unsigned int start)
{
- return drm_agp_bind_memory(handle, start);
+ return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
+int drm_unbind_agp(struct agp_memory * handle)
{
- return drm_agp_unbind_memory(handle);
+ return agp_unbind_memory(handle);
}
-EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
@@ -136,8 +110,7 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
@@ -146,8 +119,7 @@ EXPORT_SYMBOL(drm_core_ioremap);
void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
@@ -159,8 +131,7 @@ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
if (!map->handle || !map->size)
return;
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
new file mode 100644
index 00000000000..e633df2f68d
--- /dev/null
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -0,0 +1,329 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <video/mipi_display.h>
+
+static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
+ .runtime_suspend = pm_generic_runtime_suspend,
+ .runtime_resume = pm_generic_runtime_resume,
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+};
+
+static struct bus_type mipi_dsi_bus_type = {
+ .name = "mipi-dsi",
+ .match = mipi_dsi_device_match,
+ .pm = &mipi_dsi_device_pm_ops,
+};
+
+static void mipi_dsi_dev_release(struct device *dev)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ of_node_put(dev->of_node);
+ kfree(dsi);
+}
+
+static const struct device_type mipi_dsi_device_type = {
+ .release = mipi_dsi_dev_release,
+};
+
+static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+{
+ struct mipi_dsi_device *dsi;
+
+ dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return ERR_PTR(-ENOMEM);
+
+ dsi->host = host;
+ dsi->dev.bus = &mipi_dsi_bus_type;
+ dsi->dev.parent = host->dev;
+ dsi->dev.type = &mipi_dsi_device_type;
+
+ device_initialize(&dsi->dev);
+
+ return dsi;
+}
+
+static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_host *host = dsi->host;
+
+ dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel);
+
+ return device_add(&dsi->dev);
+}
+
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+ struct mipi_dsi_device *dsi;
+ struct device *dev = host->dev;
+ int ret;
+ u32 reg;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret) {
+ dev_err(dev, "device node %s has no valid reg property: %d\n",
+ node->full_name, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (reg > 3) {
+ dev_err(dev, "device node %s has invalid reg property: %u\n",
+ node->full_name, reg);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dsi = mipi_dsi_device_alloc(host);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to allocate DSI device %s: %ld\n",
+ node->full_name, PTR_ERR(dsi));
+ return dsi;
+ }
+
+ dsi->dev.of_node = of_node_get(node);
+ dsi->channel = reg;
+
+ ret = mipi_dsi_device_add(dsi);
+ if (ret) {
+ dev_err(dev, "failed to add DSI device %s: %d\n",
+ node->full_name, ret);
+ kfree(dsi);
+ return ERR_PTR(ret);
+ }
+
+ return dsi;
+}
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host)
+{
+ struct device_node *node;
+
+ for_each_available_child_of_node(host->dev->of_node, node) {
+ /* skip nodes without reg property */
+ if (!of_find_property(node, "reg", NULL))
+ continue;
+ of_mipi_dsi_device_add(host, node);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_host_register);
+
+static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ device_unregister(&dsi->dev);
+
+ return 0;
+}
+
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+ device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+}
+EXPORT_SYMBOL(mipi_dsi_host_unregister);
+
+/**
+ * mipi_dsi_attach - attach a DSI device to its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_attach(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->attach)
+ return -ENOSYS;
+
+ return ops->attach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_attach);
+
+/**
+ * mipi_dsi_detach - detach a DSI device from its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_detach(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->detach)
+ return -ENOSYS;
+
+ return ops->detach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_detach);
+
+/**
+ * mipi_dsi_dcs_write - send DCS write command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @data: pointer to the command followed by parameters
+ * @len: length of @data
+ */
+int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+ const void *data, size_t len)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .tx_buf = data,
+ .tx_len = len
+ };
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ switch (len) {
+ case 0:
+ return -EINVAL;
+ case 1:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE;
+ break;
+ case 2:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
+ break;
+ default:
+ msg.type = MIPI_DSI_DCS_LONG_WRITE;
+ break;
+ }
+
+ return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write);
+
+/**
+ * mipi_dsi_dcs_read - send DCS read request command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @cmd: DCS read command
+ * @data: pointer to read buffer
+ * @len: length of @data
+ *
+ * Function returns number of read bytes or error code.
+ */
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+ u8 cmd, void *data, size_t len)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+ .rx_buf = data,
+ .rx_len = len
+ };
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
+
+static int mipi_dsi_drv_probe(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ return drv->probe(dsi);
+}
+
+static int mipi_dsi_drv_remove(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ return drv->remove(dsi);
+}
+
+static void mipi_dsi_drv_shutdown(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ drv->shutdown(dsi);
+}
+
+/**
+ * mipi_dsi_driver_register - register a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+{
+ drv->driver.bus = &mipi_dsi_bus_type;
+ if (drv->probe)
+ drv->driver.probe = mipi_dsi_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = mipi_dsi_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = mipi_dsi_drv_shutdown;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_register);
+
+/**
+ * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_unregister);
+
+static int __init mipi_dsi_bus_init(void)
+{
+ return bus_register(&mipi_dsi_bus_type);
+}
+postcore_initcall(mipi_dsi_bus_init);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI DSI Bus");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec3297712..04a209e2b66 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -41,469 +41,814 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include "drmP.h"
-#include "drm_mm.h"
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
-#define MM_UNUSED_TARGET 4
+/**
+ * DOC: Overview
+ *
+ * drm_mm provides a simple range allocator. The drivers are free to use the
+ * resource allocator from the linux core if it suits them, the upside of drm_mm
+ * is that it's in the DRM core. Which means that it's easier to extend for
+ * some of the crazier special purpose needs of gpus.
+ *
+ * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
+ * Drivers are free to embed either of them into their own suitable
+ * datastructures. drm_mm itself will not do any allocations of its own, so if
+ * drivers choose not to embed nodes they need to still allocate them
+ * themselves.
+ *
+ * The range allocator also supports reservation of preallocated blocks. This is
+ * useful for taking over initial mode setting configurations from the firmware,
+ * where an object needs to be created which exactly matches the firmware's
+ * scanout target. As long as the range is still free it can be inserted anytime
+ * after the allocator is initialized, which helps with avoiding looped
+ * depencies in the driver load sequence.
+ *
+ * drm_mm maintains a stack of most recently freed holes, which of all
+ * simplistic datastructures seems to be a fairly decent approach to clustering
+ * allocations and avoiding too much fragmentation. This means free space
+ * searches are O(num_holes). Given that all the fancy features drm_mm supports
+ * something better would be fairly complex and since gfx thrashing is a fairly
+ * steep cliff not a real concern. Removing a node again is O(1).
+ *
+ * drm_mm supports a few features: Alignment and range restrictions can be
+ * supplied. Further more every &drm_mm_node has a color value (which is just an
+ * opaqua unsigned long) which in conjunction with a driver callback can be used
+ * to implement sophisticated placement restrictions. The i915 DRM driver uses
+ * this to implement guard pages between incompatible caching domains in the
+ * graphics TT.
+ *
+ * Two behaviors are supported for searching and allocating: bottom-up and top-down.
+ * The default is bottom-up. Top-down allocation can be used if the memory area
+ * has different restrictions, or just to reduce fragmentation.
+ *
+ * Finally iteration helpers to walk all nodes and all holes are provided as are
+ * some basic allocator dumpers for debugging.
+ */
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags);
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_search_flags flags);
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return 0;
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+ enum drm_mm_allocator_flags flags)
+{
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
+
+ BUG_ON(node->allocated);
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start = adj_end - size;
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp) {
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start -= tmp;
+ else
+ adj_start += alignment - tmp;
+ }
+ }
- return entry->size;
-}
+ BUG_ON(adj_start < hole_start);
+ BUG_ON(adj_end > hole_end);
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ if (adj_start == hole_start) {
+ hole_node->hole_follows = 0;
+ list_del(&hole_node->hole_stack);
+ }
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return -ENOMEM;
+ node->start = adj_start;
+ node->size = size;
+ node->mm = mm;
+ node->color = color;
+ node->allocated = 1;
- if (entry->size <= size)
- return -ENOMEM;
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
- entry->size -= size;
- return 0;
-}
+ BUG_ON(node->start + node->size > adj_end);
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
- struct drm_mm_node *child;
-
- if (atomic)
- child = kmalloc(sizeof(*child), GFP_ATOMIC);
- else
- child = kmalloc(sizeof(*child), GFP_KERNEL);
-
- if (unlikely(child == NULL)) {
- spin_lock(&mm->unused_lock);
- if (list_empty(&mm->unused_nodes))
- child = NULL;
- else {
- child =
- list_entry(mm->unused_nodes.next,
- struct drm_mm_node, fl_entry);
- list_del(&child->fl_entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
+ node->hole_follows = 0;
+ if (__drm_mm_hole_node_start(node) < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
}
- return child;
}
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm: memory manager struct we are pre-allocating for
+/**
+ * drm_mm_reserve_node - insert an pre-initialized node
+ * @mm: drm_mm allocator to insert @node into
+ * @node: drm_mm_node to insert
+ *
+ * This functions inserts an already set-up drm_mm_node into the allocator,
+ * meaning that start, size and color must be set by the caller. This is useful
+ * to initialize the allocator with preallocated objects which must be set-up
+ * before the range allocator can be set-up, e.g. when taking over a firmware
+ * framebuffer.
*
- * Returns 0 on success or -ENOMEM if allocation fails.
+ * Returns:
+ * 0 on success, -ENOSPC if there's no hole where @node is.
*/
-int drm_mm_pre_get(struct drm_mm *mm)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- struct drm_mm_node *node;
-
- spin_lock(&mm->unused_lock);
- while (mm->num_unused < MM_UNUSED_TARGET) {
- spin_unlock(&mm->unused_lock);
- node = kmalloc(sizeof(*node), GFP_KERNEL);
- spin_lock(&mm->unused_lock);
-
- if (unlikely(node == NULL)) {
- int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- spin_unlock(&mm->unused_lock);
- return ret;
- }
- ++mm->num_unused;
- list_add_tail(&node->fl_entry, &mm->unused_nodes);
- }
- spin_unlock(&mm->unused_lock);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
+ struct drm_mm_node *hole;
+ unsigned long end = node->start + node->size;
+ unsigned long hole_start;
+ unsigned long hole_end;
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
-{
- struct drm_mm_node *child;
+ BUG_ON(node == NULL);
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
+ /* Find the relevant hole to add our node to */
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (hole_start > node->start || hole_end < end)
+ continue;
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
+ node->mm = mm;
+ node->allocated = 1;
- list_add_tail(&child->ml_entry, &mm->ml_entry);
- list_add_tail(&child->fl_entry, &mm->fl_entry);
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole->node_list);
- return 0;
-}
+ if (node->start == hole_start) {
+ hole->hole_follows = 0;
+ list_del_init(&hole->hole_stack);
+ }
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ }
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size,
- size, atomic);
+ return 0;
}
- entry->size += size;
- return 0;
-}
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
+ return -ENOSPC;
+}
+EXPORT_SYMBOL(drm_mm_reserve_node);
+
+/**
+ * drm_mm_insert_node_generic - search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @sflags: flags to fine-tune the allocation search
+ * @aflags: flags to fine-tune the allocation behavior
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags sflags,
+ enum drm_mm_allocator_flags aflags)
{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
-
- INIT_LIST_HEAD(&child->fl_entry);
-
- child->free = 0;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
+ struct drm_mm_node *hole_node;
- list_add_tail(&child->ml_entry, &parent->ml_entry);
- INIT_LIST_HEAD(&child->fl_entry);
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+ color, sflags);
+ if (!hole_node)
+ return -ENOSPC;
- parent->size -= size;
- parent->start += size;
- return child;
+ drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
+ return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
+
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+ unsigned long start, unsigned long end,
+ enum drm_mm_allocator_flags flags)
+{
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
+
+ BUG_ON(!hole_node->hole_follows || node->allocated);
+
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start = adj_end - size;
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp) {
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start -= tmp;
+ else
+ adj_start += alignment - tmp;
+ }
+ }
+ if (adj_start == hole_start) {
+ hole_node->hole_follows = 0;
+ list_del(&hole_node->hole_stack);
+ }
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- int atomic)
-{
+ node->start = adj_start;
+ node->size = size;
+ node->mm = mm;
+ node->color = color;
+ node->allocated = 1;
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
- if (alignment)
- tmp = node->start % alignment;
+ BUG_ON(node->start < start);
+ BUG_ON(node->start < adj_start);
+ BUG_ON(node->start + node->size > adj_end);
+ BUG_ON(node->start + node->size > end);
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
+ node->hole_follows = 0;
+ if (__drm_mm_hole_node_start(node) < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
}
+}
- if (node->size == size) {
- list_del_init(&node->fl_entry);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
+/**
+ * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @start: start of the allowed range for this node
+ * @end: end of the allowed range for this node
+ * @sflags: flags to fine-tune the allocation search
+ * @aflags: flags to fine-tune the allocation behavior
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+ unsigned long start, unsigned long end,
+ enum drm_mm_search_flags sflags,
+ enum drm_mm_allocator_flags aflags)
+{
+ struct drm_mm_node *hole_node;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+ start, end, sflags);
+ if (!hole_node)
+ return -ENOSPC;
- return node;
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
+ start, end, aflags);
+ return 0;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
+/**
+ * drm_mm_remove_node - Remove a memory node from the allocator.
+ * @node: drm_mm_node to remove
+ *
+ * This just removes a node from its drm_mm allocator. The node does not need to
+ * be cleared again before it can be re-inserted into this or any other drm_mm
+ * allocator. It is a bug to call this function on a un-allocated node.
+ */
+void drm_mm_remove_node(struct drm_mm_node *node)
{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
-
- if (node->start < start)
- wasted += start - node->start;
- if (alignment)
- tmp = ((node->start + wasted) % alignment);
-
- if (tmp)
- wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
- if (node->size == size) {
- list_del_init(&node->fl_entry);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
+ if (WARN_ON(!node->allocated))
+ return;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ BUG_ON(node->scanned_block || node->scanned_prev_free
+ || node->scanned_next_free);
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
- */
+ if (node->hole_follows) {
+ BUG_ON(__drm_mm_hole_node_start(node) ==
+ __drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ } else
+ BUG_ON(__drm_mm_hole_node_start(node) !=
+ __drm_mm_hole_node_end(node));
-void drm_mm_put_block(struct drm_mm_node *cur)
-{
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->ml_entry;
- struct list_head *root_head = &mm->ml_entry;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
+}
+EXPORT_SYMBOL(drm_mm_remove_node);
- int merged = 0;
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
+{
+ if (end - start < size)
+ return 0;
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, ml_entry);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->ml_entry);
- list_del(&next_node->fl_entry);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->fl_entry,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
+ if (alignment) {
+ unsigned tmp = start % alignment;
+ if (tmp)
+ start += alignment - tmp;
}
- if (!merged) {
- cur->free = 1;
- list_add(&cur->fl_entry, &mm->fl_entry);
- } else {
- list_del(&cur->ml_entry);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->fl_entry, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
-}
-EXPORT_SYMBOL(drm_mm_put_block);
+ return end >= start + size;
+}
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match)
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
+ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+ flags & DRM_MM_SEARCH_BELOW) {
+ unsigned long hole_size = adj_end - adj_start;
- if (entry->size < size)
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
+
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (alignment) {
- register unsigned tmp = entry->start % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
+ if (!(flags & DRM_MM_SEARCH_BEST))
+ return entry;
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
+ if (hole_size < best_size) {
+ best = entry;
+ best_size = hole_size;
}
}
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free);
-struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int best_match)
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_search_flags flags)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
+ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+ flags & DRM_MM_SEARCH_BELOW) {
+ unsigned long hole_size = adj_end - adj_start;
- if (entry->size < size)
- continue;
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
- if (entry->start > end || (entry->start+entry->size) < start)
- continue;
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
- if (entry->start < start)
- wasted += start - entry->start;
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
+ continue;
- if (alignment) {
- register unsigned tmp = (entry->start + wasted) % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
+ if (!(flags & DRM_MM_SEARCH_BEST))
+ return entry;
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
+ if (hole_size < best_size) {
+ best = entry;
+ best_size = hole_size;
}
}
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_in_range);
-int drm_mm_clean(struct drm_mm * mm)
+/**
+ * drm_mm_replace_node - move an allocation from @old to @new
+ * @old: drm_mm_node to remove from the allocator
+ * @new: drm_mm_node which should inherit @old's allocation
+ *
+ * This is useful for when drivers embed the drm_mm_node structure and hence
+ * can't move allocations by reassigning pointers. It's a combination of remove
+ * and insert with the guarantee that the allocation start will match.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
- struct list_head *head = &mm->ml_entry;
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->hole_stack, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+ new->color = old->color;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+EXPORT_SYMBOL(drm_mm_replace_node);
- return (head->next->next == head);
+/**
+ * DOC: lru scan roaster
+ *
+ * Very often GPUs need to have continuous allocations for a given object. When
+ * evicting objects to make space for a new one it is therefore not most
+ * efficient when we simply start to select all objects from the tail of an LRU
+ * until there's a suitable hole: Especially for big objects or nodes that
+ * otherwise have special allocation constraints there's a good chance we evict
+ * lots of (smaller) objects unecessarily.
+ *
+ * The DRM range allocator supports this use-case through the scanning
+ * interfaces. First a scan operation needs to be initialized with
+ * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
+ * objects to the roaster (probably by walking an LRU list, but this can be
+ * freely implemented) until a suitable hole is found or there's no further
+ * evitable object.
+ *
+ * The the driver must walk through all objects again in exactly the reverse
+ * order to restore the allocator state. Note that while the allocator is used
+ * in the scan mode no other operation is allowed.
+ *
+ * Finally the driver evicts all objects selected in the scan. Adding and
+ * removing an object is O(1), and since freeing a node is also O(1) the overall
+ * complexity is O(scanned_objects). So like the free stack which needs to be
+ * walked before a scan operation even begins this is linear in the number of
+ * objects. It doesn't seem to hurt badly.
+ */
+
+/**
+ * drm_mm_init_scan - initialize lru scanning
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. Note that there's no need to specify allocation flags, since they only
+ * change the place a node is allocated from within a suitable hole.
+ *
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color)
+{
+ mm->scan_color = color;
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_end = 0;
+ mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
}
-EXPORT_SYMBOL(drm_mm_clean);
+EXPORT_SYMBOL(drm_mm_init_scan);
+
+/**
+ * drm_mm_init_scan - initialize range-restricted lru scanning
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
+ * @start: start of the allowed range for the allocation
+ * @end: end of the allowed range for the allocation
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. Note that there's no need to specify allocation flags, since they only
+ * change the place a node is allocated from within a suitable hole.
+ *
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_color = color;
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_end = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+/**
+ * drm_mm_scan_add_block - add a node to the scan list
+ * @node: drm_mm_node to add
+ *
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns:
+ * True if a hole has been found, false otherwise.
+ */
+bool drm_mm_scan_add_block(struct drm_mm_node *node)
{
- INIT_LIST_HEAD(&mm->ml_entry);
- INIT_LIST_HEAD(&mm->fl_entry);
- INIT_LIST_HEAD(&mm->unused_nodes);
- mm->num_unused = 0;
- spin_lock_init(&mm->unused_lock);
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
+ unsigned long adj_start, adj_end;
+
+ mm->scanned_blocks++;
+
+ BUG_ON(node->scanned_block);
+ node->scanned_block = 1;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+
+ adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+ adj_end = hole_end = drm_mm_hole_node_end(prev_node);
+
+ if (mm->scan_check_range) {
+ if (adj_start < mm->scan_start)
+ adj_start = mm->scan_start;
+ if (adj_end > mm->scan_end)
+ adj_end = mm->scan_end;
+ }
+
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color,
+ &adj_start, &adj_end);
+
+ if (check_free_hole(adj_start, adj_end,
+ mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_end = hole_end;
+ return true;
+ }
- return drm_mm_create_tail_node(mm, start, size, 0);
+ return false;
}
-EXPORT_SYMBOL(drm_mm_init);
+EXPORT_SYMBOL(drm_mm_scan_add_block);
-void drm_mm_takedown(struct drm_mm * mm)
+/**
+ * drm_mm_scan_remove_block - remove a node from the scan list
+ * @node: drm_mm_node to remove
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns:
+ * True if this block should be evicted, false otherwise. Will always
+ * return false when no hole has been found.
+ */
+bool drm_mm_scan_remove_block(struct drm_mm_node *node)
{
- struct list_head *bnode = mm->fl_entry.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+ mm->scanned_blocks--;
- if (entry->ml_entry.next != &mm->ml_entry ||
- entry->fl_entry.next != &mm->fl_entry) {
- DRM_ERROR("Memory manager not clean. Delaying takedown\n");
- return;
- }
+ BUG_ON(!node->scanned_block);
+ node->scanned_block = 0;
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
- kfree(entry);
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
- list_del(&entry->fl_entry);
- kfree(entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ list_add(&node->node_list, &prev_node->node_list);
+
+ return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+ node->start < mm->scan_hit_end);
+}
+EXPORT_SYMBOL(drm_mm_scan_remove_block);
+
+/**
+ * drm_mm_clean - checks whether an allocator is clean
+ * @mm: drm_mm allocator to check
+ *
+ * Returns:
+ * True if the allocator is completely free, false if there's still a node
+ * allocated in it.
+ */
+bool drm_mm_clean(struct drm_mm * mm)
+{
+ struct list_head *head = &mm->head_node.node_list;
- BUG_ON(mm->num_unused != 0);
+ return (head->next->next == head);
+}
+EXPORT_SYMBOL(drm_mm_clean);
+
+/**
+ * drm_mm_init - initialize a drm-mm allocator
+ * @mm: the drm_mm structure to initialize
+ * @start: start of the range managed by @mm
+ * @size: end of the range managed by @mm
+ *
+ * Note that @mm must be cleared to 0 before calling this function.
+ */
+void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+ INIT_LIST_HEAD(&mm->hole_stack);
+ mm->scanned_blocks = 0;
+
+ /* Clever trick to avoid a special case in the free hole tracking. */
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+ mm->color_adjust = NULL;
+}
+EXPORT_SYMBOL(drm_mm_init);
+
+/**
+ * drm_mm_takedown - clean up a drm_mm allocator
+ * @mm: drm_mm allocator to clean up
+ *
+ * Note that it is a bug to call this function on an allocator which is not
+ * clean.
+ */
+void drm_mm_takedown(struct drm_mm * mm)
+{
+ WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n");
}
EXPORT_SYMBOL(drm_mm_takedown);
+static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
+ const char *prefix)
+{
+ unsigned long hole_start, hole_end, hole_size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ return hole_size;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_mm_debug_table - dump allocator state to dmesg
+ * @mm: drm_mm allocator to dump
+ * @prefix: prefix to use for dumping to dmesg
+ */
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+
+ total_free += drm_mm_debug_hole(&mm->head_node, prefix);
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ drm_mm_for_each_node(entry, mm) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ entry->size);
+ total_used += entry->size;
+ total_free += drm_mm_debug_hole(entry, prefix);
}
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total = total_free + total_used;
+
+ printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
#if defined(CONFIG_DEBUG_FS)
+static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
+{
+ unsigned long hole_start, hole_end, hole_size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ return hole_size;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_mm_dump_table - dump allocator state to a seq_file
+ * @m: seq_file to dump to
+ * @mm: drm_mm allocator to dump
+ */
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+
+ total_free += drm_mm_dump_hole(m, &mm->head_node);
+
+ drm_mm_for_each_node(entry, mm) {
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+ entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+ total_free += drm_mm_dump_hole(m, entry);
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+ total = total_free + total_used;
+
+ seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6d81a02463a..bedf1894e17 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1,9 +1,4 @@
/*
- * The list_sort function is (presumably) licensed under the GPL (see the
- * top level "COPYING" file for details).
- *
- * The remainder of this file is:
- *
* Copyright © 1997-2003 by The XFree86 Project, Inc.
* Copyright © 2007 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
@@ -36,21 +31,23 @@
*/
#include <linux/list.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
+#include <linux/list_sort.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+#include <drm/drm_modes.h>
+
+#include "drm_crtc_internal.h"
/**
- * drm_mode_debug_printmodeline - debug print a mode
- * @dev: DRM device
+ * drm_mode_debug_printmodeline - print a mode to dmesg
* @mode: mode to print
*
- * LOCKING:
- * None.
- *
* Describe @mode using DRM_DEBUG.
*/
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@@ -63,33 +60,97 @@ void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
/**
- * drm_cvt_mode -create a modeline based on CVT algorithm
+ * drm_mode_create - create a new display mode
* @dev: DRM device
- * @hdisplay: hdisplay size
- * @vdisplay: vdisplay size
- * @vrefresh : vrefresh rate
- * @reduced : Whether the GTF calculation is simplified
- * @interlaced:Whether the interlace is supported
*
- * LOCKING:
- * none.
+ * Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it
+ * and return it.
+ *
+ * Returns:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+ if (!nmode)
+ return NULL;
+
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ kfree(nmode);
+ return NULL;
+ }
+
+ return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * Release @mode's unique ID, then free it @mode structure itself using kfree.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ if (!mode)
+ return;
+
+ drm_mode_object_put(dev, &mode->base);
+
+ kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed_mode list
+ * @connector: connector the new mode
+ * @mode: mode data
*
- * return the modeline based on CVT algorithm
+ * Add @mode to @connector's probed_mode list for later use. This list should
+ * then in a second step get filtered and all the modes actually supported by
+ * the hardware moved to the @connector's modes list.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
+
+ list_add_tail(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_cvt_mode -create a modeline based on the CVT algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate
+ * @reduced: whether to use reduced blanking
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: whether to add margins (borders)
*
* This function is called to generate the modeline based on CVT algorithm
* according to the hdisplay, vdisplay, vrefresh.
* It is based from the VESA(TM) Coordinated Video Timing Generator by
* Graham Loveridge April 9, 2003 available at
- * http://www.vesa.org/public/CVT/CVTd6r1.xls
+ * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
*
* And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
* What I have done is to translate it by using integer calculation.
+ *
+ * Returns:
+ * The modeline based on the CVT algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
-#define HV_FACTOR 1000
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins)
{
+#define HV_FACTOR 1000
/* 1) top/bottom margin size (% of height) - default: 1.8, */
#define CVT_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
@@ -255,15 +316,20 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
/* Fill in HSync values */
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
- if (interlaced)
+ if (interlaced) {
drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
@@ -272,43 +338,37 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- return drm_mode;
+ return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
- *
- * @dev :drm device
- * @hdisplay :hdisplay size
- * @vdisplay :vdisplay size
- * @vrefresh :vrefresh rate.
- * @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
- *
- * LOCKING.
- * none.
- *
- * return the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on the full GTF algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate.
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: desired margin (borders) size
+ * @GTF_M: extended GTF formula parameters
+ * @GTF_2C: extended GTF formula parameters
+ * @GTF_K: extended GTF formula parameters
+ * @GTF_2J: extended GTF formula parameters
*
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
*
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * Returns:
+ * The modeline based on the full GTF algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
@@ -320,17 +380,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
@@ -464,104 +516,166 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock = pixel_freq;
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
return drm_mode;
}
-EXPORT_SYMBOL(drm_gtf_mode);
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
/**
- * drm_mode_set_name - set the name on a mode
- * @mode: name will be set in this mode
+ * drm_gtf_mode - create the modeline based on the GTF algorithm
+ * @dev: drm device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh: vrefresh rate.
+ * @interlaced: whether to compute an interlaced mode
+ * @margins: desired margin (borders) size
+ *
+ * return the modeline based on GTF algorithm
*
- * LOCKING:
- * None.
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
*
- * Set the name of @mode to a standard format.
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ *
+ * Returns:
+ * The modeline based on the GTF algorithm stored in a drm_display_mode object.
+ * The display mode object is allocated with drm_mode_create(). Returns NULL
+ * when no mode could be allocated.
*/
-void drm_mode_set_name(struct drm_display_mode *mode)
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins)
{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh,
+ interlaced, margins,
+ 600, 40 * 2, 128, 20 * 2);
}
-EXPORT_SYMBOL(drm_mode_set_name);
+EXPORT_SYMBOL(drm_gtf_mode);
+#ifdef CONFIG_VIDEOMODE_HELPERS
/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
+ * drm_display_mode_from_videomode - fill in @dmode using @vm,
+ * @vm: videomode structure to use as source
+ * @dmode: drm_display_mode structure to use as destination
*
- * Move all the modes from @head to @new.
+ * Fills out @dmode using the display mode specified in @vm.
*/
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+void drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
{
-
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, head) {
- list_move_tail(entry, new);
- }
+ dmode->hdisplay = vm->hactive;
+ dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+ dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
+ dmode->htotal = dmode->hsync_end + vm->hback_porch;
+
+ dmode->vdisplay = vm->vactive;
+ dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
+ dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
+ dmode->vtotal = dmode->vsync_end + vm->vback_porch;
+
+ dmode->clock = vm->pixelclock / 1000;
+
+ dmode->flags = 0;
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ dmode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
+ dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
+ dmode->flags |= DRM_MODE_FLAG_DBLCLK;
+ drm_mode_set_name(dmode);
}
-EXPORT_SYMBOL(drm_mode_list_concat);
+EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
+#ifdef CONFIG_OF
/**
- * drm_mode_width - get the width of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's width (hdisplay) value.
+ * of_get_drm_display_mode - get a drm_display_mode from devicetree
+ * @np: device_node with the timing specification
+ * @dmode: will be set to the return value
+ * @index: index into the list of display timings in devicetree
*
- * FIXME: is this needed?
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
*
- * RETURNS:
- * @mode->hdisplay
+ * Returns:
+ * 0 on success, a negative errno code when no of videomode node was found.
*/
-int drm_mode_width(struct drm_display_mode *mode)
+int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, int index)
{
- return mode->hdisplay;
+ struct videomode vm;
+ int ret;
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ drm_display_mode_from_videomode(&vm, dmode);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ drm_mode_debug_printmodeline(dmode);
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mode_width);
+EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
+#endif /* CONFIG_OF */
+#endif /* CONFIG_VIDEOMODE_HELPERS */
/**
- * drm_mode_height - get the height of a mode
- * @mode: mode
- *
- * LOCKING:
- * None.
- *
- * Return @mode's height (vdisplay) value.
- *
- * FIXME: is this needed?
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
*
- * RETURNS:
- * @mode->vdisplay
+ * Set the name of @mode to a standard format which is <hdisplay>x<vdisplay>
+ * with an optional 'i' suffix for interlaced modes.
*/
-int drm_mode_height(struct drm_display_mode *mode)
+void drm_mode_set_name(struct drm_display_mode *mode)
{
- return mode->vdisplay;
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
}
-EXPORT_SYMBOL(drm_mode_height);
+EXPORT_SYMBOL(drm_mode_set_name);
/** drm_mode_hsync - get the hsync of a mode
* @mode: mode
*
- * LOCKING:
- * None.
- *
- * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ * Returns:
+ * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
+ * value first if it is not yet set.
*/
-int drm_mode_hsync(struct drm_display_mode *mode)
+int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
@@ -583,19 +697,11 @@ EXPORT_SYMBOL(drm_mode_hsync);
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
- * LOCKING:
- * None.
- *
- * Return @mode's vrefresh rate in Hz or calculate it if necessary.
- *
- * FIXME: why is this needed? shouldn't vrefresh be set already?
- *
- * RETURNS:
- * Vertical refresh rate. It will be the result of actual value plus 0.5.
- * If it is 70.288, it will return 70Hz.
- * If it is 59.6, it will return 60Hz.
+ * Returns:
+ * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
+ * value first if it is not yet set.
*/
-int drm_mode_vrefresh(struct drm_display_mode *mode)
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
@@ -622,20 +728,24 @@ int drm_mode_vrefresh(struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
- * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
* @p: mode
- * @adjust_flags: unused? (FIXME)
+ * @adjust_flags: a combination of adjustment flags
*
- * LOCKING:
- * None.
+ * Setup the CRTC modesetting timing parameters for @p, adjusting if necessary.
*
- * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ * interlaced modes.
+ * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
+ * buffers containing two eyes (only adjust the timings when needed, eg. for
+ * "frame packing" or "side by side full").
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
return;
+ p->crtc_clock = p->clock;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
@@ -653,8 +763,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
-
- p->crtc_vtotal |= 1;
}
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
@@ -671,41 +779,68 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vtotal *= p->vscan;
}
+ if (adjust_flags & CRTC_STEREO_DOUBLE) {
+ unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
+
+ switch (layout) {
+ case DRM_MODE_FLAG_3D_FRAME_PACKING:
+ p->crtc_clock *= 2;
+ p->crtc_vdisplay += p->crtc_vtotal;
+ p->crtc_vsync_start += p->crtc_vtotal;
+ p->crtc_vsync_end += p->crtc_vtotal;
+ p->crtc_vtotal += p->crtc_vtotal;
+ break;
+ }
+ }
+
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
- p->crtc_hadjusted = false;
- p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+/**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+ * Copy an existing mode into another mode, preserving the object id and
+ * list head of the destination mode.
+ */
+void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
+{
+ int id = dst->base.id;
+ struct list_head head = dst->head;
+
+ *dst = *src;
+ dst->base.id = id;
+ dst->head = head;
+}
+EXPORT_SYMBOL(drm_mode_copy);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
- * @m: mode to duplicate
- *
- * LOCKING:
- * None.
+ * @dev: drm_device to allocate the duplicated mode for
+ * @mode: mode to duplicate
*
* Just allocate a new mode, copy the existing mode into it, and return
* a pointer to it. Used to create new instances of established modes.
+ *
+ * Returns:
+ * Pointer to duplicated mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
- int new_id;
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
- new_id = nmode->base.id;
- *nmode = *mode;
- nmode->base.id = new_id;
- INIT_LIST_HEAD(&nmode->head);
+ drm_mode_copy(nmode, mode);
+
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
@@ -715,15 +850,12 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* @mode1: first mode
* @mode2: second mode
*
- * LOCKING:
- * None.
- *
* Check to see if @mode1 and @mode2 are equivalent.
*
- * RETURNS:
+ * Returns:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
@@ -733,6 +865,28 @@ bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mod
} else if (mode1->clock != mode2->clock)
return false;
+ if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
+ (mode2->flags & DRM_MODE_FLAG_3D_MASK))
+ return false;
+
+ return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_equal_no_clocks_no_stereo - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks nor the stereo layout.
+ *
+ * Returns:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2)
+{
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
mode1->hsync_end == mode2->hsync_end &&
@@ -743,12 +897,13 @@ bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mod
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
- mode1->flags == mode2->flags)
+ (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+ (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
return false;
}
-EXPORT_SYMBOL(drm_mode_equal);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
@@ -756,25 +911,19 @@ EXPORT_SYMBOL(drm_mode_equal);
* @mode_list: list of modes to check
* @maxX: maximum width
* @maxY: maximum height
- * @maxPitch: max pitch
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
*
- * The DRM device (@dev) has size and pitch limits. Here we validate the
- * modes we probed for @dev against those limits and set their status as
- * necessary.
+ * This function is a helper which can be used to validate modes against size
+ * limitations of the DRM device/connector. If a mode is too big its status
+ * memeber is updated with the appropriate validation failure code. The list
+ * itself is not changed.
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
- int maxX, int maxY, int maxPitch)
+ int maxX, int maxY)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, mode_list, head) {
- if (maxPitch > 0 && mode->hdisplay > maxPitch)
- mode->status = MODE_BAD_WIDTH;
-
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
@@ -785,54 +934,15 @@ void drm_mode_validate_size(struct drm_device *dev,
EXPORT_SYMBOL(drm_mode_validate_size);
/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question. This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
- struct list_head *mode_list,
- int *min, int *max, int n_ranges)
-{
- struct drm_display_mode *mode;
- int i;
-
- list_for_each_entry(mode, mode_list, head) {
- bool good = false;
- for (i = 0; i < n_ranges; i++) {
- if (mode->clock >= min[i] && mode->clock <= max[i]) {
- good = true;
- break;
- }
- }
- if (!good)
- mode->status = MODE_CLOCK_RANGE;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
-/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
* @mode_list: list of modes to check
* @verbose: be verbose about it
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Once mode list generation is complete, a caller can use this routine to
- * remove invalid modes from a mode list. If any of the modes have a
- * status other than %MODE_OK, they are removed from @mode_list and freed.
+ * This helper function can be used to prune a display mode list after
+ * validation has been completed. All modes who's status is not MODE_OK will be
+ * removed from the list, and if @verbose the status code and mode name is also
+ * printed to dmesg.
*/
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose)
@@ -855,20 +965,18 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
/**
* drm_mode_compare - compare modes for favorability
+ * @priv: unused
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
- * LOCKING:
- * None.
- *
* Compare two modes, given by @lh_a and @lh_b, returning a value indicating
* which is better.
*
- * RETURNS:
+ * Returns:
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
-static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
{
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
@@ -881,122 +989,48 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
if (diff)
return diff;
- diff = b->clock - a->clock;
- return diff;
-}
-
-/* FIXME: what we don't have a list sort function? */
-/* list sort from Mark J Roberts (mjr@znex.org) */
-void list_sort(struct list_head *head,
- int (*cmp)(struct list_head *a, struct list_head *b))
-{
- struct list_head *p, *q, *e, *list, *tail, *oldhead;
- int insize, nmerges, psize, qsize, i;
-
- list = head->next;
- list_del(head);
- insize = 1;
- for (;;) {
- p = oldhead = list;
- list = tail = NULL;
- nmerges = 0;
-
- while (p) {
- nmerges++;
- q = p;
- psize = 0;
- for (i = 0; i < insize; i++) {
- psize++;
- q = q->next == oldhead ? NULL : q->next;
- if (!q)
- break;
- }
- qsize = insize;
- while (psize > 0 || (qsize > 0 && q)) {
- if (!psize) {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- } else if (!qsize || !q) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else if (cmp(p, q) <= 0) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- }
- if (tail)
- tail->next = e;
- else
- list = e;
- e->prev = tail;
- tail = e;
- }
- p = q;
- }
-
- tail->next = list;
- list->prev = tail;
-
- if (nmerges <= 1)
- break;
-
- insize *= 2;
- }
+ diff = b->vrefresh - a->vrefresh;
+ if (diff)
+ return diff;
- head->next = list;
- head->prev = list->prev;
- list->prev->next = head;
- list->prev = head;
+ diff = b->clock - a->clock;
+ return diff;
}
/**
* drm_mode_sort - sort mode list
- * @mode_list: list to sort
+ * @mode_list: list of drm_display_mode structures to sort
*
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Sort @mode_list by favorability, putting good modes first.
+ * Sort @mode_list by favorability, moving good modes to the head of the list.
*/
void drm_mode_sort(struct list_head *mode_list)
{
- list_sort(mode_list, drm_mode_compare);
+ list_sort(NULL, mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
+ * @merge_type_bits: whether to merge or overright type bits.
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
- * list and only adds different modes. All modes unverified after this point
- * will be removed by the prune invalid modes.
+ * list and only adds different/new modes.
+ *
+ * This is just a helper functions doesn't validate any modes itself and also
+ * doesn't prune any invalid modes. Callers need to do that themselves.
*/
-void drm_mode_connector_list_update(struct drm_connector *connector)
+void drm_mode_connector_list_update(struct drm_connector *connector,
+ bool merge_type_bits)
{
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
int found_it;
+ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
+
list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
head) {
found_it = 0;
@@ -1007,7 +1041,10 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
- mode->type |= pmode->type;
+ if (merge_type_bits)
+ mode->type |= pmode->type;
+ else
+ mode->type = pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
@@ -1020,3 +1057,209 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
}
}
EXPORT_SYMBOL(drm_mode_connector_list_update);
+
+/**
+ * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
+ * @mode_option: optional per connector mode option
+ * @connector: connector to parse modeline for
+ * @mode: preallocated drm_cmdline_mode structure to fill out
+ *
+ * This parses @mode_option command line modeline for modes and options to
+ * configure the connector. If @mode_option is NULL the default command line
+ * modeline in fb_mode_option will be parsed instead.
+ *
+ * This uses the same parameters as the fb modedb.c, except for an extra
+ * force-enable, force-enable-digital and force-disable bit at the end:
+ *
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+ * The intermediate drm_cmdline_mode structure is required to store additional
+ * options from the command line modline like the force-enabel/disable flag.
+ *
+ * Returns:
+ * True if a valid modeline has been parsed, false otherwise.
+ */
+bool drm_mode_parse_command_line_for_connector(const char *mode_option,
+ struct drm_connector *connector,
+ struct drm_cmdline_mode *mode)
+{
+ const char *name;
+ unsigned int namelen;
+ bool res_specified = false, bpp_specified = false, refresh_specified = false;
+ unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
+ bool yres_specified = false, cvt = false, rb = false;
+ bool interlace = false, margins = false, was_digit = false;
+ int i;
+ enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
+
+#ifdef CONFIG_FB
+ if (!mode_option)
+ mode_option = fb_mode_option;
+#endif
+
+ if (!mode_option) {
+ mode->specified = false;
+ return false;
+ }
+
+ name = mode_option;
+ namelen = strlen(name);
+ for (i = namelen-1; i >= 0; i--) {
+ switch (name[i]) {
+ case '@':
+ if (!refresh_specified && !bpp_specified &&
+ !yres_specified && !cvt && !rb && was_digit) {
+ refresh = simple_strtol(&name[i+1], NULL, 10);
+ refresh_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ break;
+ case '-':
+ if (!bpp_specified && !yres_specified && !cvt &&
+ !rb && was_digit) {
+ bpp = simple_strtol(&name[i+1], NULL, 10);
+ bpp_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ break;
+ case 'x':
+ if (!yres_specified && was_digit) {
+ yres = simple_strtol(&name[i+1], NULL, 10);
+ yres_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ break;
+ case '0' ... '9':
+ was_digit = true;
+ break;
+ case 'M':
+ if (yres_specified || cvt || was_digit)
+ goto done;
+ cvt = true;
+ break;
+ case 'R':
+ if (yres_specified || cvt || rb || was_digit)
+ goto done;
+ rb = true;
+ break;
+ case 'm':
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ margins = true;
+ break;
+ case 'i':
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ interlace = true;
+ break;
+ case 'e':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ force = DRM_FORCE_ON;
+ break;
+ case 'D':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+ force = DRM_FORCE_ON;
+ else
+ force = DRM_FORCE_ON_DIGITAL;
+ break;
+ case 'd':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ force = DRM_FORCE_OFF;
+ break;
+ default:
+ goto done;
+ }
+ }
+
+ if (i < 0 && yres_specified) {
+ char *ch;
+ xres = simple_strtol(name, &ch, 10);
+ if ((ch != NULL) && (*ch == 'x'))
+ res_specified = true;
+ else
+ i = ch - name;
+ } else if (!yres_specified && was_digit) {
+ /* catch mode that begins with digits but has no 'x' */
+ i = 0;
+ }
+done:
+ if (i >= 0) {
+ printk(KERN_WARNING
+ "parse error at position %i in video mode '%s'\n",
+ i, name);
+ mode->specified = false;
+ return false;
+ }
+
+ if (res_specified) {
+ mode->specified = true;
+ mode->xres = xres;
+ mode->yres = yres;
+ }
+
+ if (refresh_specified) {
+ mode->refresh_specified = true;
+ mode->refresh = refresh;
+ }
+
+ if (bpp_specified) {
+ mode->bpp_specified = true;
+ mode->bpp = bpp;
+ }
+ mode->rb = rb;
+ mode->cvt = cvt;
+ mode->interlace = interlace;
+ mode->margins = margins;
+ mode->force = force;
+
+ return true;
+}
+EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+
+/**
+ * drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
+ * @dev: DRM device to create the new mode for
+ * @cmd: input command line modeline
+ *
+ * Returns:
+ * Pointer to converted mode on success, NULL on error.
+ */
+struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ struct drm_display_mode *mode;
+
+ if (cmd->cvt)
+ mode = drm_cvt_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->rb, cmd->interlace,
+ cmd->margins);
+ else
+ mode = drm_gtf_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->interlace,
+ cmd->margins);
+ if (!mode)
+ return NULL;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ return mode;
+}
+EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
new file mode 100644
index 00000000000..0dc57d5ecd1
--- /dev/null
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * DOC: kms locking
+ *
+ * As KMS moves toward more fine grained locking, and atomic ioctl where
+ * userspace can indirectly control locking order, it becomes necessary
+ * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
+ * the locking is more distributed around the driver code, we want a bit
+ * of extra utility/tracking out of our acquire-ctx. This is provided
+ * by drm_modeset_lock / drm_modeset_acquire_ctx.
+ *
+ * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
+ *
+ * The basic usage pattern is to:
+ *
+ * drm_modeset_acquire_init(&ctx)
+ * retry:
+ * foreach (lock in random_ordered_set_of_locks) {
+ * ret = drm_modeset_lock(lock, &ctx)
+ * if (ret == -EDEADLK) {
+ * drm_modeset_backoff(&ctx);
+ * goto retry;
+ * }
+ * }
+ *
+ * ... do stuff ...
+ *
+ * drm_modeset_drop_locks(&ctx);
+ * drm_modeset_acquire_fini(&ctx);
+ */
+
+
+/**
+ * drm_modeset_acquire_init - initialize acquire context
+ * @ctx: the acquire context
+ * @flags: for future
+ */
+void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
+ uint32_t flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
+ INIT_LIST_HEAD(&ctx->locked);
+}
+EXPORT_SYMBOL(drm_modeset_acquire_init);
+
+/**
+ * drm_modeset_acquire_fini - cleanup acquire context
+ * @ctx: the acquire context
+ */
+void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
+{
+ ww_acquire_fini(&ctx->ww_ctx);
+}
+EXPORT_SYMBOL(drm_modeset_acquire_fini);
+
+/**
+ * drm_modeset_drop_locks - drop all locks
+ * @ctx: the acquire context
+ *
+ * Drop all locks currently held against this acquire context.
+ */
+void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
+{
+ WARN_ON(ctx->contended);
+ while (!list_empty(&ctx->locked)) {
+ struct drm_modeset_lock *lock;
+
+ lock = list_first_entry(&ctx->locked,
+ struct drm_modeset_lock, head);
+
+ drm_modeset_unlock(lock);
+ }
+}
+EXPORT_SYMBOL(drm_modeset_drop_locks);
+
+static inline int modeset_lock(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool interruptible, bool slow)
+{
+ int ret;
+
+ WARN_ON(ctx->contended);
+
+ if (interruptible && slow) {
+ ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
+ } else if (interruptible) {
+ ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
+ } else if (slow) {
+ ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
+ ret = 0;
+ } else {
+ ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
+ }
+ if (!ret) {
+ WARN_ON(!list_empty(&lock->head));
+ list_add(&lock->head, &ctx->locked);
+ } else if (ret == -EALREADY) {
+ /* we already hold the lock.. this is fine. For atomic
+ * we will need to be able to drm_modeset_lock() things
+ * without having to keep track of what is already locked
+ * or not.
+ */
+ ret = 0;
+ } else if (ret == -EDEADLK) {
+ ctx->contended = lock;
+ }
+
+ return ret;
+}
+
+static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
+ bool interruptible)
+{
+ struct drm_modeset_lock *contended = ctx->contended;
+
+ ctx->contended = NULL;
+
+ if (WARN_ON(!contended))
+ return 0;
+
+ drm_modeset_drop_locks(ctx);
+
+ return modeset_lock(contended, ctx, interruptible, true);
+}
+
+/**
+ * drm_modeset_backoff - deadlock avoidance backoff
+ * @ctx: the acquire context
+ *
+ * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
+ * you must call this function to drop all currently held locks and
+ * block until the contended lock becomes available.
+ */
+void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
+{
+ modeset_backoff(ctx, false);
+}
+EXPORT_SYMBOL(drm_modeset_backoff);
+
+/**
+ * drm_modeset_backoff_interruptible - deadlock avoidance backoff
+ * @ctx: the acquire context
+ *
+ * Interruptible version of drm_modeset_backoff()
+ */
+int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
+{
+ return modeset_backoff(ctx, true);
+}
+EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
+
+/**
+ * drm_modeset_lock - take modeset lock
+ * @lock: lock to take
+ * @ctx: acquire ctx
+ *
+ * If ctx is not NULL, then its ww acquire context is used and the
+ * lock will be tracked by the context and can be released by calling
+ * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
+ * deadlock scenario has been detected and it is an error to attempt
+ * to take any more locks without first calling drm_modeset_backoff().
+ */
+int drm_modeset_lock(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ if (ctx)
+ return modeset_lock(lock, ctx, false, false);
+
+ ww_mutex_lock(&lock->mutex, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(drm_modeset_lock);
+
+/**
+ * drm_modeset_lock_interruptible - take modeset lock
+ * @lock: lock to take
+ * @ctx: acquire ctx
+ *
+ * Interruptible version of drm_modeset_lock()
+ */
+int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ if (ctx)
+ return modeset_lock(lock, ctx, true, false);
+
+ return ww_mutex_lock_interruptible(&lock->mutex, NULL);
+}
+EXPORT_SYMBOL(drm_modeset_lock_interruptible);
+
+/**
+ * drm_modeset_unlock - drop modeset lock
+ * @lock: lock to release
+ */
+void drm_modeset_unlock(struct drm_modeset_lock *lock)
+{
+ list_del_init(&lock->head);
+ ww_mutex_unlock(&lock->mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock);
+
+/* Temporary.. until we have sufficiently fine grained locking, there
+ * are a couple scenarios where it is convenient to grab all crtc locks.
+ * It is planned to remove this:
+ */
+int drm_modeset_lock_all_crtcs(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ list_for_each_entry(crtc, &config->crtc_list, head) {
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
new file mode 100644
index 00000000000..2ef988e037b
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+static DEFINE_MUTEX(panel_lock);
+static LIST_HEAD(panel_list);
+
+void drm_panel_init(struct drm_panel *panel)
+{
+ INIT_LIST_HEAD(&panel->list);
+}
+EXPORT_SYMBOL(drm_panel_init);
+
+int drm_panel_add(struct drm_panel *panel)
+{
+ mutex_lock(&panel_lock);
+ list_add_tail(&panel->list, &panel_list);
+ mutex_unlock(&panel_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_add);
+
+void drm_panel_remove(struct drm_panel *panel)
+{
+ mutex_lock(&panel_lock);
+ list_del_init(&panel->list);
+ mutex_unlock(&panel_lock);
+}
+EXPORT_SYMBOL(drm_panel_remove);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
+{
+ if (panel->connector)
+ return -EBUSY;
+
+ panel->connector = connector;
+ panel->drm = connector->dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_attach);
+
+int drm_panel_detach(struct drm_panel *panel)
+{
+ panel->connector = NULL;
+ panel->drm = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_detach);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+ struct drm_panel *panel;
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (panel->dev->of_node == np) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+ return NULL;
+}
+EXPORT_SYMBOL(of_drm_find_panel);
+#endif
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM panel infrastructure");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e68ebf92fa2..020cfd93485 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -1,17 +1,3 @@
-/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
-/**
- * \file drm_pci.c
- * \brief Functions and ioctls to manage PCI memory
- *
- * \warning These interfaces aren't stable yet.
- *
- * \todo Implement the remaining ioctl's for the PCI pools.
- * \todo The wrappers here are so thin that they would be better off inlined..
- *
- * \author José Fonseca <jrfonseca@tungstengraphics.com>
- * \author Leif Delgass <ldelgass@retinalburn.net>
- */
-
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
@@ -37,23 +23,25 @@
*/
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include "drmP.h"
-
-/**********************************************************************/
-/** \name PCI memory */
-/*@{*/
+#include <linux/export.h>
+#include <drm/drmP.h>
/**
- * \brief Allocate a PCI consistent memory block, for DMA.
+ * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
+ * @dev: DRM device
+ * @size: size of block to allocate
+ * @align: alignment of block
+ *
+ * Return: A handle to the allocated memory block on success or NULL on
+ * failure.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
-#if 1
unsigned long addr;
size_t sz;
-#endif
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -80,7 +68,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page(addr));
+ SetPageReserved(virt_to_page((void *)addr));
}
return dmah;
@@ -88,24 +76,22 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
EXPORT_SYMBOL(drm_pci_alloc);
-/**
- * \brief Free a PCI consistent memory block without freeing its descriptor.
+/*
+ * Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
-#if 1
unsigned long addr;
size_t sz;
-#endif
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
+ ClearPageReserved(virt_to_page((void *)addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
@@ -113,7 +99,9 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
}
/**
- * \brief Free a PCI consistent memory block
+ * drm_pci_free - Free a PCI consistent memory block
+ * @dev: DRM device
+ * @dmah: handle to memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
@@ -123,4 +111,364 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
-/*@}*/
+#ifdef CONFIG_PCI
+
+static int drm_get_pci_domain(struct drm_device *dev)
+{
+#ifndef __alpha__
+ /* For historical reasons, drm_get_pci_domain() is busticated
+ * on most archs and has to remain so for userspace interface
+ * < 1.4, except on alpha which was right from the beginning
+ */
+ if (dev->if_version < 0x10004)
+ return 0;
+#endif /* __alpha__ */
+
+ return pci_domain_nr(dev->pdev->bus);
+}
+
+static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+
+ len = snprintf(master->unique, master->unique_len,
+ "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+
+ if (len >= master->unique_len) {
+ DRM_ERROR("buffer overflow");
+ ret = -EINVAL;
+ goto err;
+ } else
+ master->unique_len = len;
+
+ return 0;
+err:
+ return ret;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
+{
+ int domain, bus, slot, func, ret;
+
+ master->unique_len = u->unique_len;
+ master->unique_size = u->unique_len + 1;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ master->unique[master->unique_len] = '\0';
+
+ /* Return error if the busid submitted doesn't match the device's actual
+ * busid.
+ */
+ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ domain = bus >> 8;
+ bus &= 0xff;
+
+ if ((domain != drm_get_pci_domain(dev)) ||
+ (bus != dev->pdev->bus->number) ||
+ (slot != PCI_SLOT(dev->pdev->devfn)) ||
+ (func != PCI_FUNC(dev->pdev->devfn))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ return ret;
+}
+
+static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+{
+ if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+ (p->busnum & 0xff) != dev->pdev->bus->number ||
+ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ return -EINVAL;
+
+ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+ return 0;
+}
+
+/**
+ * drm_irq_by_busid - Get interrupt from bus ID
+ * @dev: DRM device
+ * @data: IOCTL parameter pointing to a drm_irq_busid structure
+ * @file_priv: DRM file private.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_irq_busid *p = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* UMS was only ever support on PCI devices. */
+ if (WARN_ON(!dev->pdev))
+ return -EINVAL;
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+ return drm_pci_irq_by_busid(dev, p);
+}
+
+static void drm_pci_agp_init(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
+ if (drm_pci_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
+ }
+ }
+}
+
+void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (dev->agp) {
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_agp_clear(dev);
+ kfree(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
+static struct drm_bus drm_pci_bus = {
+ .set_busid = drm_pci_set_busid,
+};
+
+/**
+ * drm_get_pci_dev - Register a PCI device with the DRM subsystem
+ * @pdev: PCI device
+ * @ent: entry from the PCI ID table that matches @pdev
+ * @driver: DRM device driver
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = drm_dev_alloc(driver, &pdev->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free;
+
+ dev->pdev = pdev;
+#ifdef __alpha__
+ dev->hose = pdev->sysdata;
+#endif
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ pci_set_drvdata(pdev, dev);
+
+ drm_pci_agp_init(dev);
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_agp;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, pci_name(pdev), dev->primary->index);
+
+ /* No locking needed since shadow-attach is single-threaded since it may
+ * only be called from the per-driver module init hook. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+
+ return 0;
+
+err_agp:
+ drm_pci_agp_destroy(dev);
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_unref(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_get_pci_dev);
+
+/**
+ * drm_pci_init - Register matching PCI devices with the DRM subsystem
+ * @driver: DRM device driver
+ * @pdriver: PCI device driver
+ *
+ * Initializes a drm_device structures, registering the stubs and initializing
+ * the AGP device.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *pid;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ driver->bus = &drm_pci_bus;
+
+ if (driver->driver_features & DRIVER_MODESET)
+ return pci_register_driver(pdriver);
+
+ /* If not using KMS, fall back to stealth mode manual scanning. */
+ INIT_LIST_HEAD(&driver->legacy_dev_list);
+ for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
+ pid = &pdriver->id_table[i];
+
+ /* Loop around setting up a DRM device for each PCI device
+ * matching our ID and device class. If we had the internal
+ * function that pci_get_subsys and pci_get_class used, we'd
+ * be able to just pass pid in instead of doing a two-stage
+ * thing.
+ */
+ pdev = NULL;
+ while ((pdev =
+ pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+ pid->subdevice, pdev)) != NULL) {
+ if ((pdev->class & pid->class_mask) != pid->class)
+ continue;
+
+ /* stealth mode requires a manual probe */
+ pci_dev_get(pdev);
+ drm_get_pci_dev(pdev, pid, driver);
+ }
+ }
+ return 0;
+}
+
+int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
+{
+ struct pci_dev *root;
+ u32 lnkcap, lnkcap2;
+
+ *mask = 0;
+ if (!dev->pdev)
+ return -EINVAL;
+
+ root = dev->pdev->bus->self;
+
+ /* we've been informed via and serverworks don't make the cut */
+ if (root->vendor == PCI_VENDOR_ID_VIA ||
+ root->vendor == PCI_VENDOR_ID_SERVERWORKS)
+ return -EINVAL;
+
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
+
+ if (lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *mask |= DRM_PCIE_SPEED_25;
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *mask |= DRM_PCIE_SPEED_50;
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *mask |= DRM_PCIE_SPEED_80;
+ } else { /* pre-r3.0 */
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *mask |= DRM_PCIE_SPEED_25;
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
+ }
+
+ DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
+ return 0;
+}
+EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
+
+#else
+
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ return -1;
+}
+
+void drm_pci_agp_destroy(struct drm_device *dev) {}
+
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -EINVAL;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
+{
+ return -EINVAL;
+}
+#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
+/**
+ * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
+ * @driver: DRM device driver
+ * @pdriver: PCI device driver
+ *
+ * Unregisters one or more devices matched by a PCI driver from the DRM
+ * subsystem.
+ */
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+ legacy_dev_list) {
+ list_del(&dev->legacy_dev_list);
+ drm_put_dev(dev);
+ }
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
new file mode 100644
index 00000000000..6d133149cc7
--- /dev/null
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * DRM universal plane helper functions
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_plane_helper.h>
+
+#define SUBPIXEL_MASK 0xffff
+
+/*
+ * This is the minimal list of formats that seem to be safe for modeset use
+ * with all current DRM drivers. Most hardware can actually support more
+ * formats than this and drivers may specify a more accurate list when
+ * creating the primary plane. However drivers that still call
+ * drm_plane_init() will use this minimal format list as the default.
+ */
+static const uint32_t safe_modeset_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+/*
+ * Returns the connectors currently associated with a CRTC. This function
+ * should be called twice: once with a NULL connector list to retrieve
+ * the list size, and once with the properly allocated list to be filled in.
+ */
+static int get_connectors_for_crtc(struct drm_crtc *crtc,
+ struct drm_connector **connector_list,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ int count = 0;
+
+ /*
+ * Note: Once we change the plane hooks to more fine-grained locking we
+ * need to grab the connection_mutex here to be able to make these
+ * checks.
+ */
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc) {
+ if (connector_list != NULL && count < num_connectors)
+ *(connector_list++) = connector;
+
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * drm_plane_helper_check_update() - Check plane update for validity
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @src: source coordinates in 16.16 fixed point
+ * @dest: integer destination coordinates
+ * @clip: integer clipping coordinates
+ * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
+ * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
+ * @can_position: is it legal to position the plane such that it
+ * doesn't cover the entire crtc? This will generally
+ * only be false for primary planes.
+ * @can_update_disabled: can the plane be updated while the crtc
+ * is disabled?
+ * @visible: output parameter indicating whether plane is still visible after
+ * clipping
+ *
+ * Checks that a desired plane update is valid. Drivers that provide
+ * their own plane handling rather than helper-provided implementations may
+ * still wish to call this function to avoid duplication of error checking
+ * code.
+ *
+ * RETURNS:
+ * Zero if update appears valid, error code on failure
+ */
+int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dest,
+ const struct drm_rect *clip,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible)
+{
+ int hscale, vscale;
+
+ if (!crtc->enabled && !can_update_disabled) {
+ DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
+ return -EINVAL;
+ }
+
+ /* Check scaling */
+ hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
+ if (hscale < 0 || vscale < 0) {
+ DRM_DEBUG_KMS("Invalid scaling of plane\n");
+ return -ERANGE;
+ }
+
+ *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
+ if (!*visible)
+ /*
+ * Plane isn't visible; some drivers can handle this
+ * so we just return success here. Drivers that can't
+ * (including those that use the primary plane helper's
+ * update function) will return an error from their
+ * update_plane handler.
+ */
+ return 0;
+
+ if (!can_position && !drm_rect_equals(dest, clip)) {
+ DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_helper_check_update);
+
+/**
+ * drm_primary_helper_update() - Helper for primary plane update
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler for primary planes. This is handler
+ * is called in response to a userspace SetPlane operation on the plane with a
+ * non-NULL framebuffer. We call the driver's modeset handler to update the
+ * framebuffer.
+ *
+ * SetPlane() on a primary plane of a disabled CRTC is not supported, and will
+ * return an error.
+ *
+ * Note that we make some assumptions about hardware limitations that may not be
+ * true for all hardware --
+ * 1) Primary plane cannot be repositioned.
+ * 2) Primary plane cannot be scaled.
+ * 3) Primary plane must cover the entire CRTC.
+ * 4) Subpixel positioning is not supported.
+ * Drivers for hardware that don't have these restrictions can provide their
+ * own implementation rather than using this helper.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_mode_set set = {
+ .crtc = crtc,
+ .fb = fb,
+ .mode = &crtc->mode,
+ .x = src_x >> 16,
+ .y = src_y >> 16,
+ };
+ struct drm_rect src = {
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+ struct drm_connector **connector_list;
+ int num_connectors, ret;
+ bool visible;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, false, &visible);
+ if (ret)
+ return ret;
+
+ if (!visible)
+ /*
+ * Primary plane isn't visible. Note that unless a driver
+ * provides their own disable function, this will just
+ * wind up returning -EINVAL to userspace.
+ */
+ return plane->funcs->disable_plane(plane);
+
+ /* Find current connectors for CRTC */
+ num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
+ BUG_ON(num_connectors == 0);
+ connector_list = kzalloc(num_connectors * sizeof(*connector_list),
+ GFP_KERNEL);
+ if (!connector_list)
+ return -ENOMEM;
+ get_connectors_for_crtc(crtc, connector_list, num_connectors);
+
+ set.connectors = connector_list;
+ set.num_connectors = num_connectors;
+
+ /*
+ * We call set_config() directly here rather than using
+ * drm_mode_set_config_internal. We're reprogramming the same
+ * connectors that were already in use, so we shouldn't need the extra
+ * cross-CRTC fb refcounting to accomodate stealing connectors.
+ * drm_mode_setplane() already handles the basic refcounting for the
+ * framebuffers involved in this operation.
+ */
+ ret = crtc->funcs->set_config(&set);
+
+ kfree(connector_list);
+ return ret;
+}
+EXPORT_SYMBOL(drm_primary_helper_update);
+
+/**
+ * drm_primary_helper_disable() - Helper for primary plane disable
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler for primary planes. This is handler
+ * is called in response to a userspace SetPlane operation on the plane with a
+ * NULL framebuffer parameter. It unconditionally fails the disable call with
+ * -EINVAL the only way to disable the primary plane without driver support is
+ * to disable the entier CRTC. Which does not match the plane ->disable hook.
+ *
+ * Note that some hardware may be able to disable the primary plane without
+ * disabling the whole CRTC. Drivers for such hardware should provide their
+ * own disable handler that disables just the primary plane (and they'll likely
+ * need to provide their own update handler as well to properly re-enable a
+ * disabled primary plane).
+ *
+ * RETURNS:
+ * Unconditionally returns -EINVAL.
+ */
+int drm_primary_helper_disable(struct drm_plane *plane)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_primary_helper_disable);
+
+/**
+ * drm_primary_helper_destroy() - Helper for primary plane destruction
+ * @plane: plane to destroy
+ *
+ * Provides a default plane destroy handler for primary planes. This handler
+ * is called during CRTC destruction. We disable the primary plane, remove
+ * it from the DRM plane list, and deallocate the plane structure.
+ */
+void drm_primary_helper_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
+EXPORT_SYMBOL(drm_primary_helper_destroy);
+
+const struct drm_plane_funcs drm_primary_helper_funcs = {
+ .update_plane = drm_primary_helper_update,
+ .disable_plane = drm_primary_helper_disable,
+ .destroy = drm_primary_helper_destroy,
+};
+EXPORT_SYMBOL(drm_primary_helper_funcs);
+
+/**
+ * drm_primary_helper_create_plane() - Create a generic primary plane
+ * @dev: drm device
+ * @formats: pixel formats supported, or NULL for a default safe list
+ * @num_formats: size of @formats; ignored if @formats is NULL
+ *
+ * Allocates and initializes a primary plane that can be used with the primary
+ * plane helpers. Drivers that wish to use driver-specific plane structures or
+ * provide custom handler functions may perform their own allocation and
+ * initialization rather than calling this function.
+ */
+struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
+ const uint32_t *formats,
+ int num_formats)
+{
+ struct drm_plane *primary;
+ int ret;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL) {
+ DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+ return NULL;
+ }
+
+ if (formats == NULL) {
+ formats = safe_modeset_formats;
+ num_formats = ARRAY_SIZE(safe_modeset_formats);
+ }
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs,
+ formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (ret) {
+ kfree(primary);
+ primary = NULL;
+ }
+
+ return primary;
+}
+EXPORT_SYMBOL(drm_primary_helper_create_plane);
+
+/**
+ * drm_crtc_init - Legacy CRTC initialization function
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * Initialize a CRTC object with a default helper-provided primary plane and no
+ * cursor plane.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ struct drm_plane *primary;
+
+ primary = drm_primary_helper_create_plane(dev, NULL, 0);
+ return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
+}
+EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
new file mode 100644
index 00000000000..d5b76f148c1
--- /dev/null
+++ b/drivers/gpu/drm/drm_platform.c
@@ -0,0 +1,126 @@
+/*
+ * Derived from drm_pci.c
+ *
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * Copyright (c) 2009, Code Aurora Forum.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+/*
+ * Register.
+ *
+ * \param platdev - Platform device struture
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+
+static int drm_get_platform_dev(struct platform_device *platdev,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = drm_dev_alloc(driver, &platdev->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->platformdev = platdev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_free;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_free:
+ drm_dev_unref(dev);
+ return ret;
+}
+
+static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret, id;
+
+ master->unique_len = 13 + strlen(dev->platformdev->name);
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ id = dev->platformdev->id;
+
+ /* if only a single instance of the platform device, id will be
+ * set to -1.. use 0 instead to avoid a funny looking bus-id:
+ */
+ if (id == -1)
+ id = 0;
+
+ len = snprintf(master->unique, master->unique_len,
+ "platform:%s:%02d", dev->platformdev->name, id);
+
+ if (len > master->unique_len) {
+ DRM_ERROR("Unique buffer overflowed\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static struct drm_bus drm_platform_bus = {
+ .set_busid = drm_platform_set_busid,
+};
+
+/**
+ * drm_platform_init - Register a platform device with the DRM subsystem
+ * @driver: DRM device driver
+ * @platform_device: platform device to register
+ *
+ * Registers the specified DRM device driver and platform device with the DRM
+ * subsystem, initializing a drm_device structure and calling the driver's
+ * .load() function.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
+{
+ DRM_DEBUG("\n");
+
+ driver->bus = &drm_platform_bus;
+ return drm_get_platform_dev(platform_device, driver);
+}
+EXPORT_SYMBOL(drm_platform_init);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
new file mode 100644
index 00000000000..304ca8cacbc
--- /dev/null
+++ b/drivers/gpu/drm/drm_prime.c
@@ -0,0 +1,761 @@
+/*
+ * Copyright © 2012 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+
+/*
+ * DMA-BUF/GEM Object references and lifetime overview:
+ *
+ * On the export the dma_buf holds a reference to the exporting GEM
+ * object. It takes this reference in handle_to_fd_ioctl, when it
+ * first calls .prime_export and stores the exporting GEM object in
+ * the dma_buf priv. This reference is released when the dma_buf
+ * object goes away in the driver .release function.
+ *
+ * On the import the importing GEM object holds a reference to the
+ * dma_buf (which in turn holds a ref to the exporting GEM object).
+ * It takes that reference in the fd_to_handle ioctl.
+ * It calls dma_buf_get, creates an attachment to it and stores the
+ * attachment in the GEM object. When this attachment is destroyed
+ * when the imported object is destroyed, we remove the attachment
+ * and drop the reference to the dma_buf.
+ *
+ * Thus the chain of references always flows in one direction
+ * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
+ *
+ * Self-importing: if userspace is using PRIME as a replacement for flink
+ * then it will get a fd->handle request for a GEM object that it created.
+ * Drivers should detect this situation and return back the gem object
+ * from the dma-buf private. Prime will do this automatically for drivers that
+ * use the drm_gem_prime_{import,export} helpers.
+ */
+
+struct drm_prime_member {
+ struct list_head entry;
+ struct dma_buf *dma_buf;
+ uint32_t handle;
+};
+
+struct drm_prime_attachment {
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+};
+
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ member = kmalloc(sizeof(*member), GFP_KERNEL);
+ if (!member)
+ return -ENOMEM;
+
+ get_dma_buf(dma_buf);
+ member->dma_buf = dma_buf;
+ member->handle = handle;
+ list_add(&member->entry, &prime_fpriv->head);
+ return 0;
+}
+
+static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->handle == handle)
+ return member->dma_buf;
+ }
+
+ return NULL;
+}
+
+static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf,
+ uint32_t *handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ *handle = member->handle;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int drm_gem_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_prime_attachment *prime_attach;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
+ if (!prime_attach)
+ return -ENOMEM;
+
+ prime_attach->dir = DMA_NONE;
+ attach->priv = prime_attach;
+
+ if (!dev->driver->gem_prime_pin)
+ return 0;
+
+ return dev->driver->gem_prime_pin(obj);
+}
+
+static void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_prime_attachment *prime_attach = attach->priv;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+ struct sg_table *sgt;
+
+ if (dev->driver->gem_prime_unpin)
+ dev->driver->gem_prime_unpin(obj);
+
+ if (!prime_attach)
+ return;
+
+ sgt = prime_attach->sgt;
+ if (sgt) {
+ if (prime_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ prime_attach->dir);
+ sg_free_table(sgt);
+ }
+
+ kfree(sgt);
+ kfree(prime_attach);
+ attach->priv = NULL;
+}
+
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf)
+{
+ struct drm_prime_member *member, *safe;
+
+ list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ dma_buf_put(dma_buf);
+ list_del(&member->entry);
+ kfree(member);
+ }
+ }
+}
+
+static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_prime_attachment *prime_attach = attach->priv;
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(dir == DMA_NONE || !prime_attach))
+ return ERR_PTR(-EINVAL);
+
+ /* return the cached mapping when possible */
+ if (prime_attach->dir == dir)
+ return prime_attach->sgt;
+
+ /*
+ * two mappings with different directions for the same attachment are
+ * not allowed
+ */
+ if (WARN_ON(prime_attach->dir != DMA_NONE))
+ return ERR_PTR(-EBUSY);
+
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!IS_ERR(sgt)) {
+ if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
+ } else {
+ prime_attach->sgt = sgt;
+ prime_attach->dir = dir;
+ }
+ }
+
+ return sgt;
+}
+
+static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* nothing to be done here */
+}
+
+/**
+ * drm_gem_dmabuf_release - dma_buf release implementation for GEM
+ * @dma_buf: buffer to be released
+ *
+ * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
+ * must use this in their dma_buf ops structure as the release callback.
+ */
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ /* drop the reference on the export fd holds */
+ drm_gem_object_unreference_unlocked(obj);
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_release);
+
+static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ return dev->driver->gem_prime_vmap(obj);
+}
+
+static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+
+static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ if (!dev->driver->gem_prime_mmap)
+ return -ENOSYS;
+
+ return dev->driver->gem_prime_mmap(obj, vma);
+}
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap = drm_gem_dmabuf_kmap,
+ .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
+ .kunmap = drm_gem_dmabuf_kunmap,
+ .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
+ * simpler APIs by using the helper functions @drm_gem_prime_export and
+ * @drm_gem_prime_import. These functions implement dma-buf support in terms of
+ * five lower-level driver callbacks:
+ *
+ * Export callbacks:
+ *
+ * - @gem_prime_pin (optional): prepare a GEM object for exporting
+ *
+ * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ *
+ * - @gem_prime_vmap: vmap a buffer exported by your driver
+ *
+ * - @gem_prime_vunmap: vunmap a buffer exported by your driver
+ *
+ * Import callback:
+ *
+ * - @gem_prime_import_sg_table (import): produce a GEM object from another
+ * driver's scatter/gather table
+ */
+
+/**
+ * drm_gem_prime_export - helper library implemention of the export callback
+ * @dev: drm_device to export from
+ * @obj: GEM object to export
+ * @flags: flags like DRM_CLOEXEC
+ *
+ * This is the implementation of the gem_prime_export functions for GEM drivers
+ * using the PRIME helpers.
+ */
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ uint32_t flags)
+{
+ struct dma_buf *dmabuf;
+
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ dmabuf = ERR_PTR(-ENOENT);
+ return dmabuf;
+ }
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ return dmabuf;
+ }
+
+ /*
+ * Note that callers do not need to clean up the export cache
+ * since the check for obj->handle_count guarantees that someone
+ * will clean it up.
+ */
+ obj->dma_buf = dmabuf;
+ get_dma_buf(obj->dma_buf);
+ /* Grab a new ref since the callers is now used by the dma-buf */
+ drm_gem_object_reference(obj);
+
+ return dmabuf;
+}
+
+/**
+ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @handle: buffer handle to export
+ * @flags: flags like DRM_CLOEXEC
+ * @prime_fd: pointer to storage for the fd id of the create dma-buf
+ *
+ * This is the PRIME export function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual exporting from GEM object to a dma-buf is done through the
+ * gem_prime_export driver callback.
+ */
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+ struct dma_buf *dmabuf;
+
+ mutex_lock(&file_priv->prime.lock);
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
+ if (dmabuf) {
+ get_dma_buf(dmabuf);
+ goto out_have_handle;
+ }
+
+ mutex_lock(&dev->object_name_lock);
+ /* re-export the original imported object */
+ if (obj->import_attach) {
+ dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
+ goto out_have_obj;
+ }
+
+ if (obj->dma_buf) {
+ get_dma_buf(obj->dma_buf);
+ dmabuf = obj->dma_buf;
+ goto out_have_obj;
+ }
+
+ dmabuf = export_and_register_object(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ ret = PTR_ERR(dmabuf);
+ mutex_unlock(&dev->object_name_lock);
+ goto out;
+ }
+
+out_have_obj:
+ /*
+ * If we've exported this buffer then cheat and add it to the import list
+ * so we get the correct handle back. We must do this under the
+ * protection of dev->object_name_lock to ensure that a racing gem close
+ * ioctl doesn't miss to remove this buffer handle from the cache.
+ */
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ dmabuf, handle);
+ mutex_unlock(&dev->object_name_lock);
+ if (ret)
+ goto fail_put_dmabuf;
+
+out_have_handle:
+ ret = dma_buf_fd(dmabuf, flags);
+ /*
+ * We must _not_ remove the buffer from the handle cache since the newly
+ * created dma buf is already linked in the global obj->dma_buf pointer,
+ * and that is invariant as long as a userspace gem handle exists.
+ * Closing the handle will clean out the cache anyway, so we don't leak.
+ */
+ if (ret < 0) {
+ goto fail_put_dmabuf;
+ } else {
+ *prime_fd = ret;
+ ret = 0;
+ }
+
+ goto out;
+
+fail_put_dmabuf:
+ dma_buf_put(dmabuf);
+out:
+ drm_gem_object_unreference_unlocked(obj);
+out_unlock:
+ mutex_unlock(&file_priv->prime.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
+/**
+ * drm_gem_prime_import - helper library implemention of the import callback
+ * @dev: drm_device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * This is the implementation of the gem_prime_import functions for GEM drivers
+ * using the PRIME helpers.
+ */
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!dev->driver->gem_prime_import_sg_table)
+ return ERR_PTR(-EINVAL);
+
+ if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_prime_import);
+
+/**
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @prime_fd: fd id of the dma-buf which should be imported
+ * @handle: pointer to storage for the handle of the imported buffer object
+ *
+ * This is the PRIME import function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual importing of GEM object from the dma-buf is done through the
+ * gem_import_export driver callback.
+ */
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ int ret;
+
+ dma_buf = dma_buf_get(prime_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ mutex_lock(&file_priv->prime.lock);
+
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
+ dma_buf, handle);
+ if (ret == 0)
+ goto out_put;
+
+ /* never seen this one, need to import */
+ mutex_lock(&dev->object_name_lock);
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ if (obj->dma_buf) {
+ WARN_ON(obj->dma_buf != dma_buf);
+ } else {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
+ }
+
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ goto out_put;
+
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ dma_buf, *handle);
+ if (ret)
+ goto fail;
+
+ mutex_unlock(&file_priv->prime.lock);
+
+ dma_buf_put(dma_buf);
+
+ return 0;
+
+fail:
+ /* hmm, if driver attached, we are relying on the free-object path
+ * to detach.. which seems ok..
+ */
+ drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+ mutex_unlock(&dev->object_name_lock);
+out_put:
+ dma_buf_put(dma_buf);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+ uint32_t flags;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_handle_to_fd)
+ return -ENOSYS;
+
+ /* check flags are valid */
+ if (args->flags & ~DRM_CLOEXEC)
+ return -EINVAL;
+
+ /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
+ flags = args->flags & DRM_CLOEXEC;
+
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
+ args->handle, flags, &args->fd);
+}
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_fd_to_handle)
+ return -ENOSYS;
+
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
+ args->fd, &args->handle);
+}
+
+/**
+ * drm_prime_pages_to_sg - converts a page array into an sg list
+ * @pages: pointer to the array of page pointers to convert
+ * @nr_pages: length of the page vector
+ *
+ * This helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space for use with dma_buf itself.
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+{
+ struct sg_table *sg = NULL;
+ int ret;
+
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return sg;
+out:
+ kfree(sg);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/**
+ * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the page array in
+ * @addrs: optional array to store the dma bus address of each page
+ * @max_pages: size of both the passed-in arrays
+ *
+ * Exports an sg table into an array of pages and addresses. This is currently
+ * required by the TTM driver in order to do correct fault handling.
+ */
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages)
+{
+ unsigned count;
+ struct scatterlist *sg;
+ struct page *page;
+ u32 len;
+ int pg_index;
+ dma_addr_t addr;
+
+ pg_index = 0;
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg->length;
+ page = sg_page(sg);
+ addr = sg_dma_address(sg);
+
+ while (len > 0) {
+ if (WARN_ON(pg_index >= max_pages))
+ return -1;
+ pages[pg_index] = page;
+ if (addrs)
+ addrs[pg_index] = addr;
+
+ page++;
+ addr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ pg_index++;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+
+/**
+ * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
+ * @obj: GEM object which was created from a dma-buf
+ * @sg: the sg-table which was pinned at import time
+ *
+ * This is the cleanup functions which GEM drivers need to call when they use
+ * @drm_gem_prime_import to import dma-bufs.
+ */
+void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+{
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+ attach = obj->import_attach;
+ if (sg)
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(attach->dmabuf, attach);
+ /* remove the reference */
+ dma_buf_put(dma_buf);
+}
+EXPORT_SYMBOL(drm_prime_gem_destroy);
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ INIT_LIST_HEAD(&prime_fpriv->head);
+ mutex_init(&prime_fpriv->lock);
+}
+
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ /* by now drm_gem_release should've made sure the list is empty */
+ WARN_ON(!list_empty(&prime_fpriv->head));
+}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
new file mode 100644
index 00000000000..d22676b89cb
--- /dev/null
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+
+/**
+ * DOC: output probing helper overview
+ *
+ * This library provides some helper code for output probing. It provides an
+ * implementation of the core connector->fill_modes interface with
+ * drm_helper_probe_single_connector_modes.
+ *
+ * It also provides support for polling connectors with a work item and for
+ * generic hotplug interrupt handling where the driver doesn't or cannot keep
+ * track of a per-connector hpd interrupt.
+ *
+ * This helper library can be used independently of the modeset helper library.
+ * Drivers can also overwrite different parts e.g. use their own hotplug
+ * handling code to avoid probing unrelated outputs.
+ */
+
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
+static void drm_mode_validate_flag(struct drm_connector *connector,
+ int flags)
+{
+ struct drm_display_mode *mode;
+
+ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+ DRM_MODE_FLAG_3D_MASK))
+ return;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ !(flags & DRM_MODE_FLAG_INTERLACE))
+ mode->status = MODE_NO_INTERLACE;
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+ !(flags & DRM_MODE_FLAG_DBLSCAN))
+ mode->status = MODE_NO_DBLESCAN;
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+ !(flags & DRM_MODE_FLAG_3D_MASK))
+ mode->status = MODE_NO_STEREO;
+ }
+
+ return;
+}
+
+static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY, bool merge_type_bits)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ int count = 0;
+ int mode_flags = 0;
+ bool verbose_prune = true;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ connector->name);
+ /* set all modes to the unverified state */
+ list_for_each_entry(mode, &connector->modes, head)
+ mode->status = MODE_UNVERIFIED;
+
+ if (connector->force) {
+ if (connector->force == DRM_FORCE_ON)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+ if (connector->funcs->force)
+ connector->funcs->force(connector);
+ } else {
+ connector->status = connector->funcs->detect(connector, true);
+ }
+
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, connector->name);
+ drm_mode_connector_update_edid_property(connector, NULL);
+ verbose_prune = false;
+ goto prune;
+ }
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+ count = drm_load_edid_firmware(connector);
+ if (count == 0)
+#endif
+ count = (*connector_funcs->get_modes)(connector);
+
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_modes_noedid(connector, 1024, 768);
+ if (count == 0)
+ goto prune;
+
+ drm_mode_connector_list_update(connector, merge_type_bits);
+
+ if (maxX && maxY)
+ drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
+
+ if (connector->interlace_allowed)
+ mode_flags |= DRM_MODE_FLAG_INTERLACE;
+ if (connector->doublescan_allowed)
+ mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ if (connector->stereo_allowed)
+ mode_flags |= DRM_MODE_FLAG_3D_MASK;
+ drm_mode_validate_flag(connector, mode_flags);
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->status == MODE_OK && connector_funcs->mode_valid)
+ mode->status = connector_funcs->mode_valid(connector,
+ mode);
+ }
+
+prune:
+ drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
+
+ if (list_empty(&connector->modes))
+ return 0;
+
+ list_for_each_entry(mode, &connector->modes, head)
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_sort(&connector->modes);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ connector->name);
+ list_for_each_entry(mode, &connector->modes, head) {
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ return count;
+}
+
+/**
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
+ * @connector: connector to probe
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
+ *
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
+ *
+ * Returns:
+ * The number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
+
+/**
+ * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
+ * @connector: connector to probe
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * This operates like drm_hehlper_probe_single_connector_modes except it
+ * replaces the mode bits instead of merging them for preferred modes.
+ */
+int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
+
+/**
+ * drm_kms_helper_hotplug_event - fire off KMS hotplug events
+ * @dev: drm_device whose connector state changed
+ *
+ * This function fires off the uevent for userspace and also calls the
+ * output_poll_changed function, which is most commonly used to inform the fbdev
+ * emulation code and allow it to update the fbcon output configuration.
+ *
+ * Drivers should call this from their hotplug handling code when a change is
+ * detected. Note that this function does not do any output detection of its
+ * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
+ * driver already.
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ */
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool repoll = false, changed = false;
+
+ if (!drm_kms_helper_poll)
+ return;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Ignore forced connectors. */
+ if (connector->force)
+ continue;
+
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+ if (old_status != connector->status) {
+ const char *old, *new;
+
+ old = drm_get_connector_status_name(old_status);
+ new = drm_get_connector_status_name(connector->status);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+ "status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ old, new);
+
+ changed = true;
+ }
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+
+ if (repoll)
+ schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
+/**
+ * drm_kms_helper_poll_disable - disable output polling
+ * @dev: drm_device
+ *
+ * This function disables the output polling work.
+ *
+ * Drivers can call this helper from their device suspend implementation. It is
+ * not an error to call this even when output polling isn't enabled or arlready
+ * disabled.
+ */
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+/**
+ * drm_kms_helper_poll_enable - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work.
+ *
+ * Drivers can call this helper from their device resume implementation. It is
+ * an error to call this when the output polling support has not yet been set
+ * up.
+ */
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
+ poll = true;
+ }
+
+ if (poll)
+ schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+/**
+ * drm_kms_helper_poll_init - initialize and enable output polling
+ * @dev: drm_device
+ *
+ * This function intializes and then also enables output polling support for
+ * @dev. Drivers which do not have reliable hotplug support in hardware can use
+ * this helper infrastructure to regularly poll such connectors for changes in
+ * their connection state.
+ *
+ * Drivers can control which connectors are polled by setting the
+ * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
+ * connectors where probing live outputs can result in visual distortion drivers
+ * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
+ * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
+ * completely ignored by the polling logic.
+ *
+ * Note that a connector can be both polled and probed from the hotplug handler,
+ * in case the hotplug interrupt is known to be unreliable.
+ */
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+/**
+ * drm_kms_helper_poll_fini - disable output polling and clean it up
+ * @dev: drm_device
+ */
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+/**
+ * drm_helper_hpd_irq_event - hotplug processing
+ * @dev: drm_device
+ *
+ * Drivers can use this helper function to run a detect cycle on all connectors
+ * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
+ * other connectors are ignored, which is useful to avoid reprobing fixed
+ * panels.
+ *
+ * This helper function is useful for drivers which can't or don't track hotplug
+ * interrupts for each connector.
+ *
+ * Drivers which support hotplug interrupts for each connector individually and
+ * which have a more fine-grained detect logic should bypass this code and
+ * directly call drm_kms_helper_hotplug_event() in case the connector state
+ * changed.
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ *
+ * Note that a connector can be both polled and probed from the hotplug handler,
+ * in case the hotplug interrupt is known to be unreliable.
+ */
+bool drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool changed = false;
+
+ if (!dev->mode_config.poll_enabled)
+ return false;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+
+ return changed;
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
deleted file mode 100644
index d379c4f2892..00000000000
--- a/drivers/gpu/drm/drm_proc.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- * the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include "drmP.h"
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static struct drm_info_list drm_proc_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node* node = PDE(inode)->data;
-
- return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
- .owner = THIS_MODULE,
- .open = drm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-int drm_proc_create_files(struct drm_info_list *files, int count,
- struct proc_dir_entry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct proc_dir_entry *ent;
- struct drm_info_node *tmp;
- char name[64];
- int i, ret;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (tmp == NULL) {
- ret = -1;
- goto fail;
- }
- tmp->minor = minor;
- tmp->info_ent = &files[i];
- list_add(&tmp->list, &minor->proc_nodes.list);
-
- ent = proc_create_data(files[i].name, S_IRUGO, root,
- &drm_proc_fops, tmp);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, files[i].name);
- list_del(&tmp->list);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
- }
- return 0;
-
-fail:
- for (i = 0; i < count; i++)
- remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
- return ret;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param minor device minor number
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, int minor_id,
- struct proc_dir_entry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
- int ret;
-
- INIT_LIST_HEAD(&minor->proc_nodes.list);
- sprintf(name, "%d", minor_id);
- minor->proc_root = proc_mkdir(name, root);
- if (!minor->proc_root) {
- DRM_ERROR("Cannot create /proc/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
- minor->proc_root, minor);
- if (ret) {
- remove_proc_entry(name, root);
- minor->proc_root = NULL;
- DRM_ERROR("Failed to create core drm proc files\n");
- return ret;
- }
-
- if (dev->driver->proc_init) {
- ret = dev->driver->proc_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/proc/dri.\n");
- return ret;
- }
- }
- return 0;
-}
-
-int drm_proc_remove_files(struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->proc_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- remove_proc_entry(files[i].name,
- minor->proc_root);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- struct drm_device *dev = minor->dev;
- char name[64];
-
- if (!root || !minor->proc_root)
- return 0;
-
- if (dev->driver->proc_cleanup)
- dev->driver->proc_cleanup(minor);
-
- drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
- sprintf(name, "%d", minor->index);
- remove_proc_entry(name, root);
-
- return 0;
-}
-
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
new file mode 100644
index 00000000000..7047ca02578
--- /dev/null
+++ b/drivers/gpu/drm/drm_rect.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drmP.h>
+#include <drm/drm_rect.h>
+
+/**
+ * drm_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * Calculate the intersection of rectangles @r1 and @r2.
+ * @r1 will be overwritten with the intersection.
+ *
+ * RETURNS:
+ * %true if rectangle @r1 is still visible after the operation,
+ * %false otherwise.
+ */
+bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
+{
+ r1->x1 = max(r1->x1, r2->x1);
+ r1->y1 = max(r1->y1, r2->y1);
+ r1->x2 = min(r1->x2, r2->x2);
+ r1->y2 = min(r1->y2, r2->y2);
+
+ return drm_rect_visible(r1);
+}
+EXPORT_SYMBOL(drm_rect_intersect);
+
+/**
+ * drm_rect_clip_scaled - perform a scaled clip operation
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @clip: clip rectangle
+ * @hscale: horizontal scaling factor
+ * @vscale: vertical scaling factor
+ *
+ * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
+ * same amounts multiplied by @hscale and @vscale.
+ *
+ * RETURNS:
+ * %true if rectangle @dst is still visible after being clipped,
+ * %false otherwise
+ */
+bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+ const struct drm_rect *clip,
+ int hscale, int vscale)
+{
+ int diff;
+
+ diff = clip->x1 - dst->x1;
+ if (diff > 0) {
+ int64_t tmp = src->x1 + (int64_t) diff * hscale;
+ src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+ }
+ diff = clip->y1 - dst->y1;
+ if (diff > 0) {
+ int64_t tmp = src->y1 + (int64_t) diff * vscale;
+ src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+ }
+ diff = dst->x2 - clip->x2;
+ if (diff > 0) {
+ int64_t tmp = src->x2 - (int64_t) diff * hscale;
+ src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+ }
+ diff = dst->y2 - clip->y2;
+ if (diff > 0) {
+ int64_t tmp = src->y2 - (int64_t) diff * vscale;
+ src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+ }
+
+ return drm_rect_intersect(dst, clip);
+}
+EXPORT_SYMBOL(drm_rect_clip_scaled);
+
+static int drm_calc_scale(int src, int dst)
+{
+ int scale = 0;
+
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+
+ if (dst == 0)
+ return 0;
+
+ scale = src / dst;
+
+ return scale;
+}
+
+/**
+ * drm_rect_calc_hscale - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * RETURNS:
+ * The horizontal scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_hscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_hscale, int max_hscale)
+{
+ int src_w = drm_rect_width(src);
+ int dst_w = drm_rect_width(dst);
+ int hscale = drm_calc_scale(src_w, dst_w);
+
+ if (hscale < 0 || dst_w == 0)
+ return hscale;
+
+ if (hscale < min_hscale || hscale > max_hscale)
+ return -ERANGE;
+
+ return hscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_hscale);
+
+/**
+ * drm_rect_calc_vscale - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * RETURNS:
+ * The vertical scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_vscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_vscale, int max_vscale)
+{
+ int src_h = drm_rect_height(src);
+ int dst_h = drm_rect_height(dst);
+ int vscale = drm_calc_scale(src_h, dst_h);
+
+ if (vscale < 0 || dst_h == 0)
+ return vscale;
+
+ if (vscale < min_vscale || vscale > max_vscale)
+ return -ERANGE;
+
+ return vscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_vscale);
+
+/**
+ * drm_calc_hscale_relaxed - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The horizontal scaling factor.
+ */
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_hscale, int max_hscale)
+{
+ int src_w = drm_rect_width(src);
+ int dst_w = drm_rect_width(dst);
+ int hscale = drm_calc_scale(src_w, dst_w);
+
+ if (hscale < 0 || dst_w == 0)
+ return hscale;
+
+ if (hscale < min_hscale) {
+ int max_dst_w = src_w / min_hscale;
+
+ drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
+
+ return min_hscale;
+ }
+
+ if (hscale > max_hscale) {
+ int max_src_w = dst_w * max_hscale;
+
+ drm_rect_adjust_size(src, max_src_w - src_w, 0);
+
+ return max_hscale;
+ }
+
+ return hscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
+
+/**
+ * drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The vertical scaling factor.
+ */
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_vscale, int max_vscale)
+{
+ int src_h = drm_rect_height(src);
+ int dst_h = drm_rect_height(dst);
+ int vscale = drm_calc_scale(src_h, dst_h);
+
+ if (vscale < 0 || dst_h == 0)
+ return vscale;
+
+ if (vscale < min_vscale) {
+ int max_dst_h = src_h / min_vscale;
+
+ drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
+
+ return min_vscale;
+ }
+
+ if (vscale > max_vscale) {
+ int max_src_h = dst_h * max_vscale;
+
+ drm_rect_adjust_size(src, 0, max_src_h - src_h);
+
+ return max_vscale;
+ }
+
+ return vscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
+
+/**
+ * drm_rect_debug_print - print the rectangle information
+ * @r: rectangle to print
+ * @fixed_point: rectangle is in 16.16 fixed point format
+ */
+void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
+{
+ int w = drm_rect_width(r);
+ int h = drm_rect_height(r);
+
+ if (fixed_point)
+ DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
+ w >> 16, ((w & 0xffff) * 15625) >> 10,
+ h >> 16, ((h & 0xffff) * 15625) >> 10,
+ r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
+ r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+ else
+ DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
+}
+EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index c7823c863d4..1c78406f6e7 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -32,7 +32,8 @@
*/
#include <linux/vmalloc.h>
-#include "drmP.h"
+#include <linux/slab.h>
+#include <drm/drmP.h>
#define DEBUG_SCATTER 0
@@ -45,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
#endif
}
-void drm_sg_cleanup(struct drm_sg_mem * entry)
+static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
@@ -63,49 +64,58 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
kfree(entry);
}
+void drm_legacy_sg_cleanup(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_sg_cleanup(dev->sg);
+ dev->sg = NULL;
+ }
+}
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+int drm_sg_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
+ struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("\n");
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
if (dev->sg)
return -EINVAL;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
- entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
+ entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
if (!entry->pagelist) {
kfree(entry);
return -ENOMEM;
}
- memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
-
- entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
+ entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
if (!entry->busaddr) {
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
- memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
if (!entry->virtual) {
@@ -183,17 +193,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
drm_sg_cleanup(entry);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_sg_alloc);
-
-
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
-
- return drm_sg_alloc(dev, request);
-
-}
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -201,6 +200,9 @@ int drm_sg_free(struct drm_device *dev, void *data,
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
deleted file mode 100644
index 463aed9403d..00000000000
--- a/drivers/gpu/drm/drm_sman.c
+++ /dev/null
@@ -1,352 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drm_sman.h"
-
-struct drm_owner_item {
- struct drm_hash_item owner_hash;
- struct list_head sman_list;
- struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
- drm_ht_remove(&sman->user_hash_tab);
- drm_ht_remove(&sman->owner_hash_tab);
- kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order)
-{
- int ret = 0;
-
- sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
- sizeof(*sman->mm),
- GFP_KERNEL);
- if (!sman->mm) {
- ret = -ENOMEM;
- goto out;
- }
- sman->num_managers = num_managers;
- INIT_LIST_HEAD(&sman->owner_items);
- ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
- if (ret)
- goto out1;
- ret = drm_ht_create(&sman->user_hash_tab, user_order);
- if (!ret)
- goto out;
-
- drm_ht_remove(&sman->owner_hash_tab);
-out1:
- kfree(sman->mm);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
-
- tmp = drm_mm_search_free(mm, size, alignment, 1);
- if (!tmp) {
- return NULL;
- }
- tmp = drm_mm_get_block(tmp, size, alignment);
- return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
- drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
- kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
- return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size)
-{
- struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
- int ret;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- return -ENOMEM;
- }
- sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
-
- if (ret) {
- kfree(mm);
- return ret;
- }
-
- sman_mm->allocate = drm_sman_mm_allocate;
- sman_mm->free = drm_sman_mm_free;
- sman_mm->destroy = drm_sman_mm_destroy;
- sman_mm->offset = drm_sman_mm_offset;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
- struct drm_sman_mm * allocator)
-{
- BUG_ON(manager >= sman->num_managers);
- sman->mm[manager] = *allocator;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
- unsigned long owner)
-{
- int ret;
- struct drm_hash_item *owner_hash_item;
- struct drm_owner_item *owner_item;
-
- ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
- if (!ret) {
- return drm_hash_entry(owner_hash_item, struct drm_owner_item,
- owner_hash);
- }
-
- owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
- if (!owner_item)
- goto out;
-
- INIT_LIST_HEAD(&owner_item->mem_blocks);
- owner_item->owner_hash.key = owner;
- if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
- goto out1;
-
- list_add_tail(&owner_item->sman_list, &sman->owner_items);
- return owner_item;
-
-out1:
- kfree(owner_item);
-out:
- return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
- unsigned long size, unsigned alignment,
- unsigned long owner)
-{
- void *tmp;
- struct drm_sman_mm *sman_mm;
- struct drm_owner_item *owner_item;
- struct drm_memblock_item *memblock;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
- if (!tmp) {
- return NULL;
- }
-
- memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
- if (!memblock)
- goto out;
-
- memblock->mm_info = tmp;
- memblock->mm = sman_mm;
- memblock->sman = sman;
-
- if (drm_ht_just_insert_please
- (&sman->user_hash_tab, &memblock->user_hash,
- (unsigned long)memblock, 32, 0, 0))
- goto out1;
-
- owner_item = drm_sman_get_owner_item(sman, owner);
- if (!owner_item)
- goto out2;
-
- list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
- return memblock;
-
-out2:
- drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
- kfree(memblock);
-out:
- sman_mm->free(sman_mm->private, tmp);
-
- return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
- struct drm_sman *sman = item->sman;
-
- list_del(&item->owner_list);
- drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
- item->mm->free(item->mm->private, item->mm_info);
- kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
- struct drm_hash_item *hash_item;
- struct drm_memblock_item *memblock_item;
-
- if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
- return -EINVAL;
-
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
- user_hash);
- drm_sman_free(memblock_item);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- list_del(&owner_item->sman_list);
- drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
- kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
- return -1;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
- drm_sman_remove_owner(sman, owner_item);
- return -1;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- struct drm_memblock_item *entry, *next;
-
- list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
- owner_list) {
- drm_sman_free(entry);
- }
- drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
- return;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
- struct drm_owner_item *entry, *next;
- unsigned int i;
- struct drm_sman_mm *sman_mm;
-
- list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
- drm_sman_do_owner_cleanup(sman, entry);
- }
- if (sman->mm) {
- for (i = 0; i < sman->num_managers; ++i) {
- sman_mm = &sman->mm[i];
- if (sman_mm->private) {
- sman_mm->destroy(sman_mm->private);
- sman_mm->private = NULL;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index ad73e141afd..14d16464000 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -1,16 +1,11 @@
-/**
- * \file drm_stub.h
- * Stub support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- */
-
/*
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -31,77 +26,91 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include "drmP.h"
-#include "drm_core.h"
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
+EXPORT_SYMBOL(drm_rnodes);
+
+/* 1 to allow user space to request universal planes (experimental) */
+unsigned int drm_universal_planes = 0;
+EXPORT_SYMBOL(drm_universal_planes);
+
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named(rnodes, drm_rnodes, int, 0600);
+module_param_named(universal_planes, drm_universal_planes, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+static DEFINE_SPINLOCK(drm_minor_lock);
struct idr drm_minors_idr;
struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
struct dentry *drm_debugfs_root;
-void drm_ut_debug_printk(unsigned int request_level,
- const char *prefix,
- const char *function_name,
- const char *format, ...)
+
+int drm_err(const char *func, const char *format, ...)
{
+ struct va_format vaf;
va_list args;
+ int r;
- if (drm_debug & request_level) {
- if (function_name)
- printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
- va_start(args, format);
- vprintk(format, args);
- va_end(args);
- }
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+
+ va_end(args);
+
+ return r;
}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-static int drm_minor_get_id(struct drm_device *dev, int type)
+EXPORT_SYMBOL(drm_err);
+
+void drm_ut_debug_printk(const char *function_name, const char *format, ...)
{
- int new_id;
- int ret;
- int base = 0, limit = 63;
-
- if (type == DRM_MINOR_CONTROL) {
- base += 64;
- limit = base + 127;
- } else if (type == DRM_MINOR_RENDER) {
- base += 128;
- limit = base + 255;
- }
-
-again:
- if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
- mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&drm_minors_idr, NULL,
- base, &new_id);
- mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN) {
- goto again;
- } else if (ret) {
- return ret;
- }
+ struct va_format vaf;
+ va_list args;
- if (new_id >= limit) {
- idr_remove(&drm_minors_idr, new_id);
- return -EINVAL;
- }
- return new_id;
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+
+ va_end(args);
}
+EXPORT_SYMBOL(drm_ut_debug_printk);
struct drm_master *drm_master_create(struct drm_minor *minor)
{
@@ -114,12 +123,13 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
kref_init(&master->refcount);
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
- drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+ if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
+ kfree(master);
+ return NULL;
+ }
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
- list_add_tail(&master->head, &minor->master_list);
-
return master;
}
@@ -137,8 +147,7 @@ static void drm_master_destroy(struct kref *kref)
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
- list_del(&master->head);
-
+ mutex_lock(&dev->struct_mutex);
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
@@ -163,6 +172,7 @@ static void drm_master_destroy(struct kref *kref)
drm_ht_remove(&master->magiclist);
+ mutex_unlock(&dev->struct_mutex);
kfree(master);
}
@@ -178,382 +188,618 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{
int ret = 0;
+ mutex_lock(&dev->master_mutex);
if (file_priv->is_master)
- return 0;
+ goto out_unlock;
+
+ if (file_priv->minor->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
- if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
- return -EINVAL;
-
- if (!file_priv->master)
- return -EINVAL;
-
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
+ if (!file_priv->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
}
- mutex_unlock(&dev->struct_mutex);
}
- return 0;
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
}
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ int ret = -EINVAL;
+
+ mutex_lock(&dev->master_mutex);
if (!file_priv->is_master)
- return -EINVAL;
+ goto out_unlock;
if (!file_priv->minor->master)
- return -EINVAL;
+ goto out_unlock;
- mutex_lock(&dev->struct_mutex);
+ ret = 0;
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
- mutex_unlock(&dev->struct_mutex);
- return 0;
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
}
-static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
+/*
+ * DRM Minors
+ * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
+ * of them is represented by a drm_minor object. Depending on the capabilities
+ * of the device-driver, different interfaces are registered.
+ *
+ * Minors can be accessed via dev->$minor_name. This pointer is either
+ * NULL or a valid drm_minor pointer and stays valid as long as the device is
+ * valid. This means, DRM minors have the same life-time as the underlying
+ * device. However, this doesn't mean that the minor is active. Minors are
+ * registered and unregistered dynamically according to device-state.
+ */
+
+static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
+ unsigned int type)
{
- int retcode;
+ switch (type) {
+ case DRM_MINOR_LEGACY:
+ return &dev->primary;
+ case DRM_MINOR_RENDER:
+ return &dev->render;
+ case DRM_MINOR_CONTROL:
+ return &dev->control;
+ default:
+ return NULL;
+ }
+}
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
+static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
- spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->drw_lock);
- spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
+ minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ if (!minor)
+ return -ENOMEM;
- idr_init(&dev->drw_idr);
+ minor->type = type;
+ minor->dev = dev;
- dev->pdev = pdev;
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
+ *drm_minor_get_slot(dev, type) = minor;
+ return 0;
+}
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
+static void drm_minor_free(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor **slot;
- if (drm_ht_create(&dev->map_hash, 12)) {
- return -ENOMEM;
+ slot = drm_minor_get_slot(dev, type);
+ if (*slot) {
+ drm_mode_group_destroy(&(*slot)->mode_group);
+ kfree(*slot);
+ *slot = NULL;
}
+}
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
+static int drm_minor_register(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *new_minor;
+ unsigned long flags;
+ int ret;
+ int minor_id;
- dev->driver = driver;
+ DRM_DEBUG("\n");
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
- goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
- }
+ new_minor = *drm_minor_get_slot(dev, type);
+ if (!new_minor)
+ return 0;
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ minor_id = idr_alloc(&drm_minors_idr,
+ NULL,
+ 64 * type,
+ 64 * (type + 1),
+ GFP_NOWAIT);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ idr_preload_end();
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
+ if (minor_id < 0)
+ return minor_id;
+
+ new_minor->index = minor_id;
+
+ ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+ goto err_id;
}
- if (driver->driver_features & DRIVER_GEM) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error_out_unreg;
- }
+ ret = drm_sysfs_device_add(new_minor);
+ if (ret) {
+ DRM_ERROR("DRM: Error sysfs_device_add.\n");
+ goto err_debugfs;
}
+ /* replace NULL with @minor so lookups will succeed from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, new_minor, new_minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ DRM_DEBUG("new minor assigned %d\n", minor_id);
return 0;
- error_out_unreg:
- drm_lastclose(dev);
- return retcode;
+err_debugfs:
+ drm_debugfs_cleanup(new_minor);
+err_id:
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor_id);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ new_minor->index = 0;
+ return ret;
}
+static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor || !minor->kdev)
+ return;
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ minor->index = 0;
+
+ drm_debugfs_cleanup(minor);
+ drm_sysfs_device_remove(minor);
+}
/**
- * Get a secondary minor number.
+ * drm_minor_acquire - Acquire a DRM minor
+ * @minor_id: Minor ID of the DRM-minor
*
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
+ * Looks up the given minor-ID and returns the respective DRM-minor object. The
+ * refence-count of the underlying device is increased so you must release this
+ * object with drm_minor_release().
*
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
+ * As long as you hold this minor, it is guaranteed that the object and the
+ * minor->dev pointer will stay valid! However, the device may get unplugged and
+ * unregistered while you hold the minor.
+ *
+ * Returns:
+ * Pointer to minor-object with increased device-refcount, or PTR_ERR on
+ * failure.
*/
-static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
- struct drm_minor *new_minor;
- int ret;
- int minor_id;
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (minor)
+ drm_dev_ref(minor->dev);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ if (!minor) {
+ return ERR_PTR(-ENODEV);
+ } else if (drm_device_is_unplugged(minor->dev)) {
+ drm_dev_unref(minor->dev);
+ return ERR_PTR(-ENODEV);
+ }
- DRM_DEBUG("\n");
+ return minor;
+}
- minor_id = drm_minor_get_id(dev, type);
- if (minor_id < 0)
- return minor_id;
+/**
+ * drm_minor_release - Release DRM minor
+ * @minor: Pointer to DRM minor object
+ *
+ * Release a minor that was previously acquired via drm_minor_acquire().
+ */
+void drm_minor_release(struct drm_minor *minor)
+{
+ drm_dev_unref(minor->dev);
+}
- new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
- if (!new_minor) {
- ret = -ENOMEM;
- goto err_idr;
+/**
+ * drm_put_dev - Unregister and release a DRM device
+ * @dev: DRM device
+ *
+ * Called at module unload time or when a PCI device is unplugged.
+ *
+ * Use of this function is discouraged. It will eventually go away completely.
+ * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ */
+void drm_put_dev(struct drm_device *dev)
+{
+ DRM_DEBUG("\n");
+
+ if (!dev) {
+ DRM_ERROR("cleanup called no dev\n");
+ return;
}
- new_minor->type = type;
- new_minor->device = MKDEV(DRM_MAJOR, minor_id);
- new_minor->dev = dev;
- new_minor->index = minor_id;
- INIT_LIST_HEAD(&new_minor->master_list);
+ drm_dev_unregister(dev);
+ drm_dev_unref(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
- idr_replace(&drm_minors_idr, new_minor, minor_id);
+void drm_unplug_dev(struct drm_device *dev)
+{
+ /* for a USB device */
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
- if (type == DRM_MINOR_LEGACY) {
- ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
- goto err_mem;
- }
- } else
- new_minor->proc_root = NULL;
+ mutex_lock(&drm_global_mutex);
-#if defined(CONFIG_DEBUG_FS)
- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_g2;
+ drm_device_set_unplugged(dev);
+
+ if (dev->open_count == 0) {
+ drm_put_dev(dev);
}
-#endif
+ mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
- ret = drm_sysfs_device_add(new_minor);
- if (ret) {
- printk(KERN_ERR
- "DRM: Error sysfs_device_add.\n");
- goto err_g2;
+/*
+ * DRM internal mount
+ * We want to be able to allocate our own "struct address_space" to control
+ * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
+ * stand-alone address_space objects, so we need an underlying inode. As there
+ * is no way to allocate an independent inode easily, we need a fake internal
+ * VFS mount-point.
+ *
+ * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
+ * frees it again. You are allowed to use iget() and iput() to get references to
+ * the inode. But each drm_fs_inode_new() call must be paired with exactly one
+ * drm_fs_inode_free() call (which does not have to be the last iput()).
+ * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
+ * between multiple inode-users. You could, technically, call
+ * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
+ * iput(), but this way you'd end up with a new vfsmount for each inode.
+ */
+
+static int drm_fs_cnt;
+static struct vfsmount *drm_fs_mnt;
+
+static const struct dentry_operations drm_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static const struct super_operations drm_fs_sops = {
+ .statfs = simple_statfs,
+};
+
+static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type,
+ "drm:",
+ &drm_fs_sops,
+ &drm_fs_dops,
+ 0x010203ff);
+}
+
+static struct file_system_type drm_fs_type = {
+ .name = "drm",
+ .owner = THIS_MODULE,
+ .mount = drm_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *drm_fs_inode_new(void)
+{
+ struct inode *inode;
+ int r;
+
+ r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
+ if (r < 0) {
+ DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
+ return ERR_PTR(r);
}
- *minor = new_minor;
- DRM_DEBUG("new minor assigned %d\n", minor_id);
- return 0;
+ inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+ return inode;
+}
-err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(new_minor, drm_proc_root);
-err_mem:
- kfree(new_minor);
-err_idr:
- idr_remove(&drm_minors_idr, minor_id);
- *minor = NULL;
- return ret;
+static void drm_fs_inode_free(struct inode *inode)
+{
+ if (inode) {
+ iput(inode);
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+ }
}
/**
- * Register.
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
*
- * \param pdev - PCI device structure
- * \param ent entry from the PCI ID table with device type flags
- * \return zero on success or a negative number on failure.
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
*
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
*/
-int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
{
struct drm_device *dev;
int ret;
- DRM_DEBUG("\n");
-
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
- return -ENOMEM;
+ return NULL;
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_g1;
+ kref_init(&dev->ref);
+ dev->dev = parent;
+ dev->driver = driver;
+
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
- pci_set_master(pdev);
- if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
+ spin_lock_init(&dev->buf_lock);
+ spin_lock_init(&dev->event_lock);
+ mutex_init(&dev->struct_mutex);
+ mutex_init(&dev->ctxlist_mutex);
+ mutex_init(&dev->master_mutex);
+
+ dev->anon_inode = drm_fs_inode_new();
+ if (IS_ERR(dev->anon_inode)) {
+ ret = PTR_ERR(dev->anon_inode);
+ DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
+ goto err_free;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
if (ret)
- goto err_g2;
+ goto err_minors;
}
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
- goto err_g4;
+ goto err_minors;
}
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
- if (ret)
- goto err_g4;
+ ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
+
+ if (drm_ht_create(&dev->map_hash, 12))
+ goto err_minors;
+
+ ret = drm_ctxbitmap_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto err_ht;
}
- list_add_tail(&dev->driver_item, &driver->device_list);
+ if (driver->driver_features & DRIVER_GEM) {
+ ret = drm_gem_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto err_ctxbitmap;
+ }
+ }
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, pci_name(pdev), dev->primary->index);
+ return dev;
- return 0;
+err_ctxbitmap:
+ drm_ctxbitmap_cleanup(dev);
+err_ht:
+ drm_ht_remove(&dev->map_hash);
+err_minors:
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+ drm_fs_inode_free(dev->anon_inode);
+err_free:
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+static void drm_dev_release(struct kref *ref)
+{
+ struct drm_device *dev = container_of(ref, struct drm_device, ref);
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
- pci_disable_device(pdev);
-err_g1:
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
+ drm_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_fs_inode_free(dev->anon_inode);
+
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev->unique);
kfree(dev);
- return ret;
}
-EXPORT_SYMBOL(drm_get_dev);
/**
- * Put a secondary minor number.
+ * drm_dev_ref - Take reference of a DRM device
+ * @dev: device to take reference of or NULL
+ *
+ * This increases the ref-count of @dev by one. You *must* already own a
+ * reference when calling this. Use drm_dev_unref() to drop this reference
+ * again.
+ *
+ * This function never fails. However, this function does not provide *any*
+ * guarantee whether the device is alive or running. It only provides a
+ * reference to the object and the memory associated with it.
+ */
+void drm_dev_ref(struct drm_device *dev)
+{
+ if (dev)
+ kref_get(&dev->ref);
+}
+EXPORT_SYMBOL(drm_dev_ref);
+
+/**
+ * drm_dev_unref - Drop reference of a DRM device
+ * @dev: device to drop reference of or NULL
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+void drm_dev_unref(struct drm_device *dev)
+{
+ if (dev)
+ kref_put(&dev->ref, drm_dev_release);
+}
+EXPORT_SYMBOL(drm_dev_unref);
+
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ * @flags: Flags passed to the driver's .load() function
*
- * \param sec_minor - structure to be released
- * \return always zero
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
*
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
+ * Never call this twice on any device!
*
+ * RETURNS:
+ * 0 on success, negative error code on failure.
*/
-int drm_put_minor(struct drm_minor **minor_p)
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
{
- struct drm_minor *minor = *minor_p;
+ int ret;
- DRM_DEBUG("release secondary minor %d\n", minor->index);
+ mutex_lock(&drm_global_mutex);
- if (minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(minor, drm_proc_root);
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_cleanup(minor);
-#endif
+ ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
- drm_sysfs_device_remove(minor);
+ ret = drm_minor_register(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
- idr_remove(&drm_minors_idr, minor->index);
+ ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
- kfree(minor);
- *minor_p = NULL;
- return 0;
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, flags);
+ if (ret)
+ goto err_minors;
+ }
+
+ /* setup grouping for legacy outputs */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_unload;
+ }
+
+ ret = 0;
+ goto out_unlock;
+
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+err_minors:
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
+ return ret;
}
+EXPORT_SYMBOL(drm_dev_register);
/**
- * Called via drm_exit() at module unload time or when pci device is
- * unplugged.
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
*
- * Cleans up all DRM device, calling drm_lastclose().
- *
- * \sa drm_init
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_unref() to drop their final reference.
*/
-void drm_put_dev(struct drm_device *dev)
+void drm_dev_unregister(struct drm_device *dev)
{
- struct drm_driver *driver;
struct drm_map_list *r_list, *list_temp;
- DRM_DEBUG("\n");
-
- if (!dev) {
- DRM_ERROR("cleanup called no dev\n");
- return;
- }
- driver = dev->driver;
-
- drm_vblank_cleanup(dev);
-
drm_lastclose(dev);
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->agp_mtrr >= 0) {
- int retval;
- retval = mtrr_del(dev->agp->agp_mtrr,
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
- DRM_DEBUG("mtrr_del=%d\n", retval);
- }
-
if (dev->driver->unload)
dev->driver->unload(dev);
- if (drm_core_has_AGP(dev) && dev->agp) {
- kfree(dev->agp);
- dev->agp = NULL;
- }
+ if (dev->agp)
+ drm_pci_agp_destroy(dev);
+
+ drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
- drm_ht_remove(&dev->map_hash);
- drm_ctxbitmap_cleanup(dev);
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @fmt: format string for unique name
+ *
+ * Sets the unique name of a DRM device using the specified format string and
+ * a variable list of arguments. Drivers can use this at driver probe time if
+ * the unique name of the devices they drive is static.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
+{
+ va_list ap;
- if (driver->driver_features & DRIVER_GEM)
- drm_gem_destroy(dev);
+ kfree(dev->unique);
- drm_put_minor(&dev->primary);
+ va_start(ap, fmt);
+ dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
- kfree(dev);
+ return dev->unique ? 0 : -ENOMEM;
}
-EXPORT_SYMBOL(drm_put_dev);
+EXPORT_SYMBOL(drm_dev_set_unique);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 7e42b7e9d43..369b26278e7 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -14,28 +14,30 @@
#include <linux/device.h>
#include <linux/kdev_t.h>
+#include <linux/gfp.h>
#include <linux/err.h>
+#include <linux/export.h>
-#include "drm_sysfs.h"
-#include "drm_core.h"
-#include "drmP.h"
+#include <drm/drm_sysfs.h>
+#include <drm/drm_core.h>
+#include <drm/drmP.h>
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+#define to_drm_minor(d) dev_get_drvdata(d)
+#define to_drm_connector(d) dev_get_drvdata(d)
static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
};
/**
- * drm_class_suspend - DRM class suspend hook
+ * __drm_class_suspend - internal DRM class suspend routine
* @dev: Linux device to suspend
* @state: power state to enter
*
* Just figures out what the actual struct drm_device associated with
* @dev is and calls its suspend hook, if present.
*/
-static int drm_class_suspend(struct device *dev, pm_message_t state)
+static int __drm_class_suspend(struct device *dev, pm_message_t state)
{
if (dev->type == &drm_sysfs_device_minor) {
struct drm_minor *drm_minor = to_drm_minor(dev);
@@ -50,6 +52,26 @@ static int drm_class_suspend(struct device *dev, pm_message_t state)
}
/**
+ * drm_class_suspend - internal DRM class suspend hook. Simply calls
+ * __drm_class_suspend() with the correct pm state.
+ * @dev: Linux device to suspend
+ */
+static int drm_class_suspend(struct device *dev)
+{
+ return __drm_class_suspend(dev, PMSG_SUSPEND);
+}
+
+/**
+ * drm_class_freeze - internal DRM class freeze hook. Simply calls
+ * __drm_class_suspend() with the correct pm state.
+ * @dev: Linux device to freeze
+ */
+static int drm_class_freeze(struct device *dev)
+{
+ return __drm_class_suspend(dev, PMSG_FREEZE);
+}
+
+/**
* drm_class_resume - DRM class resume hook
* @dev: Linux device to resume
*
@@ -70,19 +92,23 @@ static int drm_class_resume(struct device *dev)
return 0;
}
-/* Display the version of drm_core. This doesn't work right in current design */
-static ssize_t version_show(struct class *dev, char *buf)
-{
- return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
- CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
-}
+static const struct dev_pm_ops drm_class_dev_pm_ops = {
+ .suspend = drm_class_suspend,
+ .resume = drm_class_resume,
+ .freeze = drm_class_freeze,
+};
-static char *drm_devnode(struct device *dev, mode_t *mode)
+static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
}
-static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
+static CLASS_ATTR_STRING(version, S_IRUGO,
+ CORE_NAME " "
+ __stringify(CORE_MAJOR) "."
+ __stringify(CORE_MINOR) "."
+ __stringify(CORE_PATCHLEVEL) " "
+ CORE_DATE);
/**
* drm_sysfs_create - create a struct drm_sysfs_class structure
@@ -106,10 +132,9 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
goto err_out;
}
- class->suspend = drm_class_suspend;
- class->resume = drm_class_resume;
+ class->pm = &drm_class_dev_pm_ops;
- err = class_create_file(class, &class_attr_version);
+ err = class_create_file(class, &class_attr_version.attr);
if (err)
goto err_out_class;
@@ -132,22 +157,9 @@ void drm_sysfs_destroy(void)
{
if ((drm_class == NULL) || (IS_ERR(drm_class)))
return;
- class_remove_file(drm_class, &class_attr_version);
+ class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
-}
-
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff. But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
- memset(dev, 0, sizeof(struct device));
- return;
+ drm_class = NULL;
}
/*
@@ -159,8 +171,15 @@ static ssize_t status_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
+ int ret;
+
+ ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ status = connector->funcs->detect(connector, true);
+ mutex_unlock(&connector->dev->mode_config.mutex);
- status = connector->funcs->detect(connector);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
@@ -174,7 +193,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_connector_property_get_value(connector,
+ ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -194,8 +213,9 @@ static ssize_t enabled_show(struct device *device,
"disabled");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
{
struct device *connector_dev = container_of(kobj, struct device, kobj);
struct drm_connector *connector = to_drm_connector(connector_dev);
@@ -268,7 +288,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -309,7 +329,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -334,52 +354,46 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
- .size = 128,
+ .size = 0,
.read = edid_show,
};
/**
- * drm_sysfs_connector_add - add an connector to sysfs
+ * drm_sysfs_connector_add - add a connector to sysfs
* @connector: connector to add
*
- * Create an connector device in sysfs, along with its associated connector
+ * Create a connector device in sysfs, along with its associated connector
* properties (so far, connection status, dpms, mode list & edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
- *
- * Note:
- * This routine should only be called *once* for each DRM minor registered.
- * A second call for an already registered device will trigger the BUG_ON
- * below.
*/
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- int ret = 0, i, j;
-
- /* We shouldn't get called more than once for the same connector */
- BUG_ON(device_is_registered(&connector->kdev));
+ int attr_cnt = 0;
+ int opt_cnt = 0;
+ int i;
+ int ret;
- connector->kdev.parent = &dev->primary->kdev;
- connector->kdev.class = drm_class;
- connector->kdev.release = drm_sysfs_device_release;
+ if (connector->kdev)
+ return 0;
+ connector->kdev = device_create(drm_class, dev->primary->kdev,
+ 0, connector, "card%d-%s",
+ dev->primary->index, connector->name);
DRM_DEBUG("adding \"%s\" to sysfs\n",
- drm_get_connector_name(connector));
-
- dev_set_name(&connector->kdev, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
- ret = device_register(&connector->kdev);
+ connector->name);
- if (ret) {
- DRM_ERROR("failed to register connector device: %d\n", ret);
+ if (IS_ERR(connector->kdev)) {
+ DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+ ret = PTR_ERR(connector->kdev);
goto out;
}
/* Standard attributes */
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[i]);
+ for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
+ ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
if (ret)
goto err_out_files;
}
@@ -395,8 +409,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
- for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
+ for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
+ ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
if (ret)
goto err_out_files;
}
@@ -405,7 +419,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
break;
}
- ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+ ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
if (ret)
goto err_out_files;
@@ -415,11 +429,11 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
return 0;
err_out_files:
- if (i > 0)
- for (j = 0; j < i; j++)
- device_remove_file(&connector->kdev,
- &connector_attrs[i]);
- device_unregister(&connector->kdev);
+ for (i = 0; i < opt_cnt; i++)
+ device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
+ for (i = 0; i < attr_cnt; i++)
+ device_remove_file(connector->kdev, &connector_attrs[i]);
+ device_unregister(connector->kdev);
out:
return ret;
@@ -443,13 +457,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
{
int i;
+ if (!connector->kdev)
+ return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
- drm_get_connector_name(connector));
+ connector->name);
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
- device_remove_file(&connector->kdev, &connector_attrs[i]);
- sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
- device_unregister(&connector->kdev);
+ device_remove_file(connector->kdev, &connector_attrs[i]);
+ sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
+ device_unregister(connector->kdev);
+ connector->kdev = NULL;
}
EXPORT_SYMBOL(drm_sysfs_connector_remove);
@@ -468,10 +485,15 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
DRM_DEBUG("generating hotplug event\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@@ -483,14 +505,9 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
*/
int drm_sysfs_device_add(struct drm_minor *minor)
{
- int err;
char *minor_str;
+ int r;
- minor->kdev.parent = &minor->dev->pdev->dev;
- minor->kdev.class = drm_class;
- minor->kdev.release = drm_sysfs_device_release;
- minor->kdev.devt = minor->device;
- minor->kdev.type = &drm_sysfs_device_minor;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
else if (minor->type == DRM_MINOR_RENDER)
@@ -498,18 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
- dev_set_name(&minor->kdev, minor_str, minor->index);
-
- err = device_register(&minor->kdev);
- if (err) {
- DRM_ERROR("device add failed: %d\n", err);
- goto err_out;
+ minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
+ if (!minor->kdev) {
+ r = -ENOMEM;
+ goto error;
}
+ device_initialize(minor->kdev);
+ minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ minor->kdev->class = drm_class;
+ minor->kdev->type = &drm_sysfs_device_minor;
+ minor->kdev->parent = minor->dev->dev;
+ minor->kdev->release = drm_sysfs_release;
+ dev_set_drvdata(minor->kdev, minor);
+
+ r = dev_set_name(minor->kdev, minor_str, minor->index);
+ if (r < 0)
+ goto error;
+
+ r = device_add(minor->kdev);
+ if (r < 0)
+ goto error;
+
return 0;
-err_out:
- return err;
+error:
+ DRM_ERROR("device create failed %d\n", r);
+ put_device(minor->kdev);
+ return r;
}
/**
@@ -521,7 +554,9 @@ err_out:
*/
void drm_sysfs_device_remove(struct drm_minor *minor)
{
- device_unregister(&minor->kdev);
+ if (minor->kdev)
+ device_unregister(minor->kdev);
+ minor->kdev = NULL;
}
@@ -537,6 +572,9 @@ void drm_sysfs_device_remove(struct drm_minor *minor)
int drm_class_device_register(struct device *dev)
{
+ if (!drm_class || IS_ERR(drm_class))
+ return -ENOENT;
+
dev->class = drm_class;
return device_register(dev);
}
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
new file mode 100644
index 00000000000..27cc95f3638
--- /dev/null
+++ b/drivers/gpu/drm/drm_trace.h
@@ -0,0 +1,66 @@
+#if !defined(_DRM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DRM_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE drm_trace
+
+TRACE_EVENT(drm_vblank_event,
+ TP_PROTO(int crtc, unsigned int seq),
+ TP_ARGS(crtc, seq),
+ TP_STRUCT__entry(
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_queued,
+ TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+ TP_ARGS(pid, crtc, seq),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("pid=%d, crtc=%d, seq=%u", __entry->pid, __entry->crtc, \
+ __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_delivered,
+ TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+ TP_ARGS(pid, crtc, seq),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("pid=%d, crtc=%d, seq=%u", __entry->pid, __entry->crtc, \
+ __entry->seq)
+);
+
+#endif /* _DRM_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/drm_trace_points.c b/drivers/gpu/drm/drm_trace_points.c
new file mode 100644
index 00000000000..3bbc4deb4db
--- /dev/null
+++ b/drivers/gpu/drm/drm_trace_points.c
@@ -0,0 +1,4 @@
+#include <drm/drmP.h>
+
+#define CREATE_TRACE_POINTS
+#include "drm_trace.h"
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
new file mode 100644
index 00000000000..f2fe94aab90
--- /dev/null
+++ b/drivers/gpu/drm/drm_usb.c
@@ -0,0 +1,88 @@
+#include <drm/drmP.h>
+#include <drm/drm_usb.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = drm_dev_alloc(driver, &interface->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->usbdev = interface_to_usbdev(interface);
+ usb_set_intfdata(interface, dev);
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_free;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_free:
+ drm_dev_unref(dev);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_get_usb_dev);
+
+static int drm_usb_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return 0;
+}
+
+static struct drm_bus drm_usb_bus = {
+ .set_busid = drm_usb_set_busid,
+};
+
+/**
+ * drm_usb_init - Register matching USB devices with the DRM subsystem
+ * @driver: DRM device driver
+ * @udriver: USB device driver
+ *
+ * Registers one or more devices matched by a USB driver with the DRM
+ * subsystem.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
+{
+ int res;
+ DRM_DEBUG("\n");
+
+ driver->bus = &drm_usb_bus;
+
+ res = usb_register(udriver);
+ return res;
+}
+EXPORT_SYMBOL(drm_usb_init);
+
+/**
+ * drm_usb_exit - Unregister matching USB devices from the DRM subsystem
+ * @driver: DRM device driver
+ * @udriver: USB device driver
+ *
+ * Unregisters one or more devices matched by a USB driver from the DRM
+ * subsystem.
+ */
+void drm_usb_exit(struct drm_driver *driver,
+ struct usb_driver *udriver)
+{
+ usb_deregister(udriver);
+}
+EXPORT_SYMBOL(drm_usb_exit);
+
+MODULE_AUTHOR("David Airlie");
+MODULE_DESCRIPTION("USB DRM support");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 4ac900f4647..24e045c4f53 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -33,26 +33,29 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <drm/drmP.h>
+#include <linux/export.h>
#if defined(__ia64__)
#include <linux/efi.h>
+#include <linux/slab.h>
#endif
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
-static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+static pgprot_t drm_io_prot(struct drm_local_map *map,
+ struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
- pgprot_val(tmp) |= _PAGE_PCD;
- pgprot_val(tmp) &= ~_PAGE_PWT;
- }
+ if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
+ tmp = pgprot_noncached(tmp);
+ else
+ tmp = pgprot_writecombine(tmp);
#elif defined(__powerpc__)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
- if (map_type == _DRM_REGISTERS)
+ if (map->type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
@@ -60,7 +63,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
@@ -98,7 +101,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* Find the right map
*/
- if (!drm_core_has_AGP(dev))
+ if (!dev->agp)
goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
@@ -137,7 +140,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
break;
}
- if (!agpmem)
+ if (&agpmem->head == &dev->agp->memory)
goto vm_fault_error;
/*
@@ -217,7 +220,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
map = vma->vm_private_data;
@@ -248,13 +250,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr,
- map->offset,
- map->size);
- DRM_DEBUG("mtrr_del = %d\n", retcode);
- }
+ arch_phys_wc_del(map->mtrr);
iounmap(map->handle);
break;
case _DRM_SHM:
@@ -269,9 +265,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
}
kfree(map);
}
@@ -304,7 +297,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+ page = virt_to_page((void *)dma->pagelist[page_nr]);
get_page(page);
vmf->page = page;
@@ -404,15 +397,13 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_inc(&dev->vma_count);
vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
if (vma_entry) {
@@ -421,6 +412,7 @@ void drm_vm_open_locked(struct vm_area_struct *vma)
list_add(&vma_entry->head, &dev->vmalist);
}
}
+EXPORT_SYMBOL_GPL(drm_vm_open_locked);
static void drm_vm_open(struct vm_area_struct *vma)
{
@@ -428,10 +420,27 @@ static void drm_vm_open(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
+void drm_vm_close_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
+{
+ struct drm_vma_entry *pt, *temp;
+
+ DRM_DEBUG("0x%08lx,0x%08lx\n",
+ vma->vm_start, vma->vm_end - vma->vm_start);
+
+ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+ if (pt->vma == vma) {
+ list_del(&pt->head);
+ kfree(pt);
+ break;
+ }
+ }
+}
+
/**
* \c close method for all virtual memory types.
*
@@ -444,20 +453,9 @@ static void drm_vm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
+ drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
@@ -506,32 +504,21 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops;
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
- return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
- return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+ return dev->hose->dense_mem_base;
#else
return 0;
#endif
}
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
/**
* mmap DMA memory.
*
@@ -600,8 +587,9 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
}
switch (map->type) {
+#if !defined(__arm__)
case _DRM_AGP:
- if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+ if (dev->agp && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
@@ -614,11 +602,11 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
break;
}
/* fall through to _DRM_FRAME_BUFFER... */
+#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
- offset = dev->driver->get_reg_ofs(dev);
- vma->vm_flags |= VM_IO; /* not in core dump */
- vma->vm_page_prot = drm_io_prot(map->type, vma);
+ offset = drm_core_get_reg_ofs(dev);
+ vma->vm_page_prot = drm_io_prot(map, vma);
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
@@ -628,6 +616,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
" offset = 0x%llx\n",
map->type,
vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
+
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_CONSISTENT:
@@ -642,24 +631,18 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
- /* Don't let this area swap. Change when
- DRM_KERNEL advisory is supported. */
- vma->vm_flags |= VM_RESERVED;
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
- vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
default:
return -EINVAL; /* This should never happen. */
}
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
@@ -669,6 +652,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
int ret;
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644
index 00000000000..63b47120507
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ *
+ * Additionally to offset management, the vma offset manager also handles access
+ * management. For every open-file context that is allowed to access a given
+ * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
+ * open-file with the offset of the node will fail with -EACCES. To revoke
+ * access again, use drm_vma_node_revoke(). However, the caller is responsible
+ * for destroying already existing mappings, if required.
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size)
+{
+ rwlock_init(&mgr->vm_lock);
+ mgr->vm_addr_space_rb = RB_ROOT;
+ drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+ /* take the lock to protect against buggy drivers */
+ write_lock(&mgr->vm_lock);
+ drm_mm_takedown(&mgr->vm_addr_space_mm);
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ read_lock(&mgr->vm_lock);
+ node = drm_vma_offset_lookup_locked(mgr, start, pages);
+ read_unlock(&mgr->vm_lock);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node, *best;
+ struct rb_node *iter;
+ unsigned long offset;
+
+ iter = mgr->vm_addr_space_rb.rb_node;
+ best = NULL;
+
+ while (likely(iter)) {
+ node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+ offset = node->vm_node.start;
+ if (start >= offset) {
+ iter = iter->rb_right;
+ best = node;
+ if (start == offset)
+ break;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ /* verify that the node spans the requested area */
+ if (best) {
+ offset = best->vm_node.start + best->vm_node.size;
+ if (offset < start + pages)
+ best = NULL;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_node *iter_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+ if (node->vm_node.start < iter_node->vm_node.start)
+ iter = &(*iter)->rb_left;
+ else if (node->vm_node.start > iter_node->vm_node.start)
+ iter = &(*iter)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&node->vm_rb, parent, iter);
+ rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages)
+{
+ int ret;
+
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+ pages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto out_unlock;
+
+ _drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+ write_unlock(&mgr->vm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+ drm_mm_remove_node(&node->vm_node);
+ memset(&node->vm_node, 0, sizeof(node->vm_node));
+ }
+
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to add
+ *
+ * Add @filp to the list of allowed open-files for this node. If @filp is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct rb_node **iter;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_file *new, *entry;
+ int ret = 0;
+
+ /* Preallocate entry to avoid atomic allocations below. It is quite
+ * unlikely that an open-file is added twice to a single node so we
+ * don't optimize for this case. OOM is checked below only if the entry
+ * is actually used. */
+ new = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ write_lock(&node->vm_lock);
+
+ iter = &node->vm_files.rb_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
+
+ if (filp == entry->vm_filp) {
+ entry->vm_count++;
+ goto unlock;
+ } else if (filp > entry->vm_filp) {
+ iter = &(*iter)->rb_right;
+ } else {
+ iter = &(*iter)->rb_left;
+ }
+ }
+
+ if (!new) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ new->vm_filp = filp;
+ new->vm_count = 1;
+ rb_link_node(&new->vm_rb, parent, iter);
+ rb_insert_color(&new->vm_rb, &node->vm_files);
+ new = NULL;
+
+unlock:
+ write_unlock(&node->vm_lock);
+ kfree(new);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_node_allow);
+
+/**
+ * drm_vma_node_revoke - Remove open-file from list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to remove
+ *
+ * Decrement the ref-count of @filp in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @filp from the list. You must call
+ * this once for every drm_vma_node_allow() on @filp.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * If @filp is not on the list, nothing is done.
+ */
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ write_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp) {
+ if (!--entry->vm_count) {
+ rb_erase(&entry->vm_rb, &node->vm_files);
+ kfree(entry);
+ }
+ break;
+ } else if (filp > entry->vm_filp) {
+ iter = iter->rb_right;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ write_unlock(&node->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_node_revoke);
+
+/**
+ * drm_vma_node_is_allowed - Check whether an open-file is granted access
+ * @node: Node to check
+ * @filp: Open-file to check for
+ *
+ * Search the list in @node whether @filp is currently on the list of allowed
+ * open-files (see drm_vma_node_allow()).
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * true iff @filp is on the list
+ */
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+ struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ read_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp)
+ break;
+ else if (filp > entry->vm_filp)
+ iter = iter->rb_right;
+ else
+ iter = iter->rb_left;
+ }
+
+ read_unlock(&node->vm_lock);
+
+ return iter;
+}
+EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
new file mode 100644
index 00000000000..178d2a9672a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -0,0 +1,98 @@
+config DRM_EXYNOS
+ tristate "DRM Support for Samsung SoC EXYNOS Series"
+ depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have a Samsung SoC EXYNOS chipset.
+ If M is selected the module will be called exynosdrm.
+
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
+config DRM_EXYNOS_DMABUF
+ bool "EXYNOS DRM DMABUF"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use DMABUF feature for DRM.
+
+config DRM_EXYNOS_FIMD
+ bool "Exynos DRM FIMD"
+ depends on DRM_EXYNOS && !FB_S3C
+ select FB_MODE_HELPERS
+ help
+ Choose this option if you want to use Exynos FIMD for DRM.
+
+config DRM_EXYNOS_DPI
+ bool "EXYNOS DRM parallel output support"
+ depends on DRM_EXYNOS_FIMD
+ select DRM_PANEL
+ default n
+ help
+ This enables support for Exynos parallel output.
+
+config DRM_EXYNOS_DSI
+ bool "EXYNOS DRM MIPI-DSI driver support"
+ depends on DRM_EXYNOS_FIMD
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ default n
+ help
+ This enables support for Exynos MIPI-DSI device.
+
+config DRM_EXYNOS_DP
+ bool "EXYNOS DRM DP driver support"
+ depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
+ default DRM_EXYNOS
+ help
+ This enables support for DP device.
+
+config DRM_EXYNOS_HDMI
+ bool "Exynos DRM HDMI"
+ depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
+ help
+ Choose this option if you want to use Exynos HDMI for DRM.
+
+config DRM_EXYNOS_VIDI
+ bool "Exynos DRM Virtual Display"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use Exynos VIDI for DRM.
+
+config DRM_EXYNOS_G2D
+ bool "Exynos DRM G2D"
+ depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
+ help
+ Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+ bool "Exynos DRM IPP"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Exynos DRM FIMC"
+ depends on DRM_EXYNOS_IPP && MFD_SYSCON
+ help
+ Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+ bool "Exynos DRM Rotator"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Exynos DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
+ help
+ Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
new file mode 100644
index 00000000000..33ae3652b8d
--- /dev/null
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -0,0 +1,25 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
+exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
+ exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
+ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
+ exynos_drm_plane.o
+
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
+
+obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
new file mode 100644
index 00000000000..a8ffc8c1477
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -0,0 +1,1393 @@
+/*
+ * Samsung SoC DP (Display Port) interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/component.h>
+#include <linux/phy/phy.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/bridge/ptn3460.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_dp_core.h"
+
+#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
+ connector)
+
+struct bridge_init {
+ struct i2c_client *client;
+ struct device_node *node;
+};
+
+static int exynos_dp_init_dp(struct exynos_dp_device *dp)
+{
+ exynos_dp_reset(dp);
+
+ exynos_dp_swreset(dp);
+
+ exynos_dp_init_analog_param(dp);
+ exynos_dp_init_interrupt(dp);
+
+ /* SW defined function Normal operation */
+ exynos_dp_enable_sw_function(dp);
+
+ exynos_dp_config_interrupt(dp);
+ exynos_dp_init_analog_func(dp);
+
+ exynos_dp_init_hpd(dp);
+ exynos_dp_init_aux(dp);
+
+ return 0;
+}
+
+static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
+{
+ int timeout_loop = 0;
+
+ while (exynos_dp_get_plug_in_status(dp) != 0) {
+ timeout_loop++;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "failed to get hpd plug status\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(10, 11);
+ }
+
+ return 0;
+}
+
+static unsigned char exynos_dp_calc_edid_check_sum(unsigned char *edid_data)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = 0; i < EDID_BLOCK_LENGTH; i++)
+ sum = sum + edid_data[i];
+
+ return sum;
+}
+
+static int exynos_dp_read_edid(struct exynos_dp_device *dp)
+{
+ unsigned char edid[EDID_BLOCK_LENGTH * 2];
+ unsigned int extend_block = 0;
+ unsigned char sum;
+ unsigned char test_vector;
+ int retval;
+
+ /*
+ * EDID device address is 0x50.
+ * However, if necessary, you must have set upper address
+ * into E-EDID in I2C device, 0x30.
+ */
+
+ /* Read Extension Flag, Number of 128-byte EDID extension blocks */
+ retval = exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
+ EDID_EXTENSION_FLAG,
+ &extend_block);
+ if (retval)
+ return retval;
+
+ if (extend_block > 0) {
+ dev_dbg(dp->dev, "EDID data includes a single extension!\n");
+
+ /* Read EDID data */
+ retval = exynos_dp_read_bytes_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
+ EDID_HEADER_PATTERN,
+ EDID_BLOCK_LENGTH,
+ &edid[EDID_HEADER_PATTERN]);
+ if (retval != 0) {
+ dev_err(dp->dev, "EDID Read failed!\n");
+ return -EIO;
+ }
+ sum = exynos_dp_calc_edid_check_sum(edid);
+ if (sum != 0) {
+ dev_err(dp->dev, "EDID bad checksum!\n");
+ return -EIO;
+ }
+
+ /* Read additional EDID data */
+ retval = exynos_dp_read_bytes_from_i2c(dp,
+ I2C_EDID_DEVICE_ADDR,
+ EDID_BLOCK_LENGTH,
+ EDID_BLOCK_LENGTH,
+ &edid[EDID_BLOCK_LENGTH]);
+ if (retval != 0) {
+ dev_err(dp->dev, "EDID Read failed!\n");
+ return -EIO;
+ }
+ sum = exynos_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
+ if (sum != 0) {
+ dev_err(dp->dev, "EDID bad checksum!\n");
+ return -EIO;
+ }
+
+ exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
+ &test_vector);
+ if (test_vector & DP_TEST_LINK_EDID_READ) {
+ exynos_dp_write_byte_to_dpcd(dp,
+ DP_TEST_EDID_CHECKSUM,
+ edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
+ exynos_dp_write_byte_to_dpcd(dp,
+ DP_TEST_RESPONSE,
+ DP_TEST_EDID_CHECKSUM_WRITE);
+ }
+ } else {
+ dev_info(dp->dev, "EDID data does not include any extensions.\n");
+
+ /* Read EDID data */
+ retval = exynos_dp_read_bytes_from_i2c(dp,
+ I2C_EDID_DEVICE_ADDR,
+ EDID_HEADER_PATTERN,
+ EDID_BLOCK_LENGTH,
+ &edid[EDID_HEADER_PATTERN]);
+ if (retval != 0) {
+ dev_err(dp->dev, "EDID Read failed!\n");
+ return -EIO;
+ }
+ sum = exynos_dp_calc_edid_check_sum(edid);
+ if (sum != 0) {
+ dev_err(dp->dev, "EDID bad checksum!\n");
+ return -EIO;
+ }
+
+ exynos_dp_read_byte_from_dpcd(dp,
+ DP_TEST_REQUEST,
+ &test_vector);
+ if (test_vector & DP_TEST_LINK_EDID_READ) {
+ exynos_dp_write_byte_to_dpcd(dp,
+ DP_TEST_EDID_CHECKSUM,
+ edid[EDID_CHECKSUM]);
+ exynos_dp_write_byte_to_dpcd(dp,
+ DP_TEST_RESPONSE,
+ DP_TEST_EDID_CHECKSUM_WRITE);
+ }
+ }
+
+ dev_err(dp->dev, "EDID Read success!\n");
+ return 0;
+}
+
+static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
+{
+ u8 buf[12];
+ int i;
+ int retval;
+
+ /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
+ retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV,
+ 12, buf);
+ if (retval)
+ return retval;
+
+ /* Read EDID */
+ for (i = 0; i < 3; i++) {
+ retval = exynos_dp_read_edid(dp);
+ if (!retval)
+ break;
+ }
+
+ return retval;
+}
+
+static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp,
+ bool enable)
+{
+ u8 data;
+
+ exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
+
+ if (enable)
+ exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
+ DP_LANE_COUNT_ENHANCED_FRAME_EN |
+ DPCD_LANE_COUNT_SET(data));
+ else
+ exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
+ DPCD_LANE_COUNT_SET(data));
+}
+
+static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp)
+{
+ u8 data;
+ int retval;
+
+ exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+ retval = DPCD_ENHANCED_FRAME_CAP(data);
+
+ return retval;
+}
+
+static void exynos_dp_set_enhanced_mode(struct exynos_dp_device *dp)
+{
+ u8 data;
+
+ data = exynos_dp_is_enhanced_mode_available(dp);
+ exynos_dp_enable_rx_to_enhanced_mode(dp, data);
+ exynos_dp_enable_enhanced_mode(dp, data);
+}
+
+static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp)
+{
+ exynos_dp_set_training_pattern(dp, DP_NONE);
+
+ exynos_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
+ int pre_emphasis, int lane)
+{
+ switch (lane) {
+ case 0:
+ exynos_dp_set_lane0_pre_emphasis(dp, pre_emphasis);
+ break;
+ case 1:
+ exynos_dp_set_lane1_pre_emphasis(dp, pre_emphasis);
+ break;
+
+ case 2:
+ exynos_dp_set_lane2_pre_emphasis(dp, pre_emphasis);
+ break;
+
+ case 3:
+ exynos_dp_set_lane3_pre_emphasis(dp, pre_emphasis);
+ break;
+ }
+}
+
+static int exynos_dp_link_start(struct exynos_dp_device *dp)
+{
+ u8 buf[4];
+ int lane, lane_count, pll_tries, retval;
+
+ lane_count = dp->link_train.lane_count;
+
+ dp->link_train.lt_state = CLOCK_RECOVERY;
+ dp->link_train.eq_loop = 0;
+
+ for (lane = 0; lane < lane_count; lane++)
+ dp->link_train.cr_loop[lane] = 0;
+
+ /* Set link rate and count as you want to establish*/
+ exynos_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
+ exynos_dp_set_lane_count(dp, dp->link_train.lane_count);
+
+ /* Setup RX configuration */
+ buf[0] = dp->link_train.link_rate;
+ buf[1] = dp->link_train.lane_count;
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET,
+ 2, buf);
+ if (retval)
+ return retval;
+
+ /* Set TX pre-emphasis to minimum */
+ for (lane = 0; lane < lane_count; lane++)
+ exynos_dp_set_lane_lane_pre_emphasis(dp,
+ PRE_EMPHASIS_LEVEL_0, lane);
+
+ /* Wait for PLL lock */
+ pll_tries = 0;
+ while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
+ dev_err(dp->dev, "Wait for PLL lock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ pll_tries++;
+ usleep_range(90, 120);
+ }
+
+ /* Set training pattern 1 */
+ exynos_dp_set_training_pattern(dp, TRAINING_PTN1);
+
+ /* Set RX training pattern */
+ retval = exynos_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
+ if (retval)
+ return retval;
+
+ for (lane = 0; lane < lane_count; lane++)
+ buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 |
+ DP_TRAIN_VOLTAGE_SWING_400;
+
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
+ lane_count, buf);
+
+ return retval;
+}
+
+static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane)
+{
+ int shift = (lane & 1) * 4;
+ u8 link_value = link_status[lane>>1];
+
+ return (link_value >> shift) & 0xf;
+}
+
+static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = exynos_dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return -EINVAL;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = exynos_dp_get_lane_status(link_status, lane);
+ lane_status &= DP_CHANNEL_EQ_BITS;
+ if (lane_status != DP_CHANNEL_EQ_BITS)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned char exynos_dp_get_adjust_request_voltage(u8 adjust_request[2],
+ int lane)
+{
+ int shift = (lane & 1) * 4;
+ u8 link_value = adjust_request[lane>>1];
+
+ return (link_value >> shift) & 0x3;
+}
+
+static unsigned char exynos_dp_get_adjust_request_pre_emphasis(
+ u8 adjust_request[2],
+ int lane)
+{
+ int shift = (lane & 1) * 4;
+ u8 link_value = adjust_request[lane>>1];
+
+ return ((link_value >> shift) & 0xc) >> 2;
+}
+
+static void exynos_dp_set_lane_link_training(struct exynos_dp_device *dp,
+ u8 training_lane_set, int lane)
+{
+ switch (lane) {
+ case 0:
+ exynos_dp_set_lane0_link_training(dp, training_lane_set);
+ break;
+ case 1:
+ exynos_dp_set_lane1_link_training(dp, training_lane_set);
+ break;
+
+ case 2:
+ exynos_dp_set_lane2_link_training(dp, training_lane_set);
+ break;
+
+ case 3:
+ exynos_dp_set_lane3_link_training(dp, training_lane_set);
+ break;
+ }
+}
+
+static unsigned int exynos_dp_get_lane_link_training(
+ struct exynos_dp_device *dp,
+ int lane)
+{
+ u32 reg;
+
+ switch (lane) {
+ case 0:
+ reg = exynos_dp_get_lane0_link_training(dp);
+ break;
+ case 1:
+ reg = exynos_dp_get_lane1_link_training(dp);
+ break;
+ case 2:
+ reg = exynos_dp_get_lane2_link_training(dp);
+ break;
+ case 3:
+ reg = exynos_dp_get_lane3_link_training(dp);
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return reg;
+}
+
+static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp)
+{
+ exynos_dp_training_pattern_dis(dp);
+ exynos_dp_set_enhanced_mode(dp);
+
+ dp->link_train.lt_state = FAILED;
+}
+
+static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
+ u8 adjust_request[2])
+{
+ int lane, lane_count;
+ u8 voltage_swing, pre_emphasis, training_lane;
+
+ lane_count = dp->link_train.lane_count;
+ for (lane = 0; lane < lane_count; lane++) {
+ voltage_swing = exynos_dp_get_adjust_request_voltage(
+ adjust_request, lane);
+ pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
+ adjust_request, lane);
+ training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
+ DPCD_PRE_EMPHASIS_SET(pre_emphasis);
+
+ if (voltage_swing == VOLTAGE_LEVEL_3)
+ training_lane |= DP_TRAIN_MAX_SWING_REACHED;
+ if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
+ training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ dp->link_train.training_lane[lane] = training_lane;
+ }
+}
+
+static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
+{
+ int lane, lane_count, retval;
+ u8 voltage_swing, pre_emphasis, training_lane;
+ u8 link_status[2], adjust_request[2];
+
+ usleep_range(100, 101);
+
+ lane_count = dp->link_train.lane_count;
+
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DP_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
+
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
+
+ if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
+ /* set training pattern 2 for EQ */
+ exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
+
+ retval = exynos_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_2);
+ if (retval)
+ return retval;
+
+ dev_info(dp->dev, "Link Training Clock Recovery success\n");
+ dp->link_train.lt_state = EQUALIZER_TRAINING;
+ } else {
+ for (lane = 0; lane < lane_count; lane++) {
+ training_lane = exynos_dp_get_lane_link_training(
+ dp, lane);
+ voltage_swing = exynos_dp_get_adjust_request_voltage(
+ adjust_request, lane);
+ pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
+ adjust_request, lane);
+
+ if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
+ voltage_swing &&
+ DPCD_PRE_EMPHASIS_GET(training_lane) ==
+ pre_emphasis)
+ dp->link_train.cr_loop[lane]++;
+
+ if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
+ voltage_swing == VOLTAGE_LEVEL_3 ||
+ pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
+ dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
+ dp->link_train.cr_loop[lane],
+ voltage_swing, pre_emphasis);
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+ }
+ }
+
+ exynos_dp_get_adjust_training_lane(dp, adjust_request);
+
+ for (lane = 0; lane < lane_count; lane++)
+ exynos_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
+
+ retval = exynos_dp_write_bytes_to_dpcd(dp,
+ DP_TRAINING_LANE0_SET, lane_count,
+ dp->link_train.training_lane);
+ if (retval)
+ return retval;
+
+ return retval;
+}
+
+static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
+{
+ int lane, lane_count, retval;
+ u32 reg;
+ u8 link_align, link_status[2], adjust_request[2];
+
+ usleep_range(400, 401);
+
+ lane_count = dp->link_train.lane_count;
+
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DP_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
+
+ if (exynos_dp_clock_recovery_ok(link_status, lane_count)) {
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
+
+ retval = exynos_dp_read_byte_from_dpcd(dp,
+ DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
+ if (retval)
+ return retval;
+
+ exynos_dp_get_adjust_training_lane(dp, adjust_request);
+
+ if (!exynos_dp_channel_eq_ok(link_status, link_align, lane_count)) {
+ /* traing pattern Set to Normal */
+ exynos_dp_training_pattern_dis(dp);
+
+ dev_info(dp->dev, "Link Training success!\n");
+
+ exynos_dp_get_link_bandwidth(dp, &reg);
+ dp->link_train.link_rate = reg;
+ dev_dbg(dp->dev, "final bandwidth = %.2x\n",
+ dp->link_train.link_rate);
+
+ exynos_dp_get_lane_count(dp, &reg);
+ dp->link_train.lane_count = reg;
+ dev_dbg(dp->dev, "final lane count = %.2x\n",
+ dp->link_train.lane_count);
+
+ /* set enhanced mode if available */
+ exynos_dp_set_enhanced_mode(dp);
+ dp->link_train.lt_state = FINISHED;
+
+ return 0;
+ }
+
+ /* not all locked */
+ dp->link_train.eq_loop++;
+
+ if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
+ dev_err(dp->dev, "EQ Max loop\n");
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+
+ for (lane = 0; lane < lane_count; lane++)
+ exynos_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
+
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
+ lane_count, dp->link_train.training_lane);
+
+ return retval;
+}
+
+static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
+ u8 *bandwidth)
+{
+ u8 data;
+
+ /*
+ * For DP rev.1.1, Maximum link rate of Main Link lanes
+ * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
+ */
+ exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
+ *bandwidth = data;
+}
+
+static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp,
+ u8 *lane_count)
+{
+ u8 data;
+
+ /*
+ * For DP rev.1.1, Maximum number of Main Link lanes
+ * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
+ */
+ exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+ *lane_count = DPCD_MAX_LANE_COUNT(data);
+}
+
+static void exynos_dp_init_training(struct exynos_dp_device *dp,
+ enum link_lane_count_type max_lane,
+ enum link_rate_type max_rate)
+{
+ /*
+ * MACRO_RST must be applied after the PLL_LOCK to avoid
+ * the DP inter pair skew issue for at least 10 us
+ */
+ exynos_dp_reset_macro(dp);
+
+ /* Initialize by reading RX's DPCD */
+ exynos_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate);
+ exynos_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count);
+
+ if ((dp->link_train.link_rate != LINK_RATE_1_62GBPS) &&
+ (dp->link_train.link_rate != LINK_RATE_2_70GBPS)) {
+ dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n",
+ dp->link_train.link_rate);
+ dp->link_train.link_rate = LINK_RATE_1_62GBPS;
+ }
+
+ if (dp->link_train.lane_count == 0) {
+ dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n",
+ dp->link_train.lane_count);
+ dp->link_train.lane_count = (u8)LANE_COUNT1;
+ }
+
+ /* Setup TX lane count & rate */
+ if (dp->link_train.lane_count > max_lane)
+ dp->link_train.lane_count = max_lane;
+ if (dp->link_train.link_rate > max_rate)
+ dp->link_train.link_rate = max_rate;
+
+ /* All DP analog module power up */
+ exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
+}
+
+static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
+{
+ int retval = 0, training_finished = 0;
+
+ dp->link_train.lt_state = START;
+
+ /* Process here */
+ while (!retval && !training_finished) {
+ switch (dp->link_train.lt_state) {
+ case START:
+ retval = exynos_dp_link_start(dp);
+ if (retval)
+ dev_err(dp->dev, "LT link start failed!\n");
+ break;
+ case CLOCK_RECOVERY:
+ retval = exynos_dp_process_clock_recovery(dp);
+ if (retval)
+ dev_err(dp->dev, "LT CR failed!\n");
+ break;
+ case EQUALIZER_TRAINING:
+ retval = exynos_dp_process_equalizer_training(dp);
+ if (retval)
+ dev_err(dp->dev, "LT EQ failed!\n");
+ break;
+ case FINISHED:
+ training_finished = 1;
+ break;
+ case FAILED:
+ return -EREMOTEIO;
+ }
+ }
+ if (retval)
+ dev_err(dp->dev, "eDP link training failed (%d)\n", retval);
+
+ return retval;
+}
+
+static int exynos_dp_set_link_train(struct exynos_dp_device *dp,
+ u32 count,
+ u32 bwtype)
+{
+ int i;
+ int retval;
+
+ for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) {
+ exynos_dp_init_training(dp, count, bwtype);
+ retval = exynos_dp_sw_link_training(dp);
+ if (retval == 0)
+ break;
+
+ usleep_range(100, 110);
+ }
+
+ return retval;
+}
+
+static int exynos_dp_config_video(struct exynos_dp_device *dp)
+{
+ int retval = 0;
+ int timeout_loop = 0;
+ int done_count = 0;
+
+ exynos_dp_config_video_slave_mode(dp);
+
+ exynos_dp_set_video_color_format(dp);
+
+ if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ dev_err(dp->dev, "PLL is not locked yet.\n");
+ return -EINVAL;
+ }
+
+ for (;;) {
+ timeout_loop++;
+ if (exynos_dp_is_slave_video_stream_clock_on(dp) == 0)
+ break;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "Timeout of video streamclk ok\n");
+ return -ETIMEDOUT;
+ }
+
+ usleep_range(1, 2);
+ }
+
+ /* Set to use the register calculated M/N video */
+ exynos_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0);
+
+ /* For video bist, Video timing must be generated by register */
+ exynos_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE);
+
+ /* Disable video mute */
+ exynos_dp_enable_video_mute(dp, 0);
+
+ /* Configure video slave mode */
+ exynos_dp_enable_video_master(dp, 0);
+
+ /* Enable video */
+ exynos_dp_start_video(dp);
+
+ timeout_loop = 0;
+
+ for (;;) {
+ timeout_loop++;
+ if (exynos_dp_is_video_stream_on(dp) == 0) {
+ done_count++;
+ if (done_count > 10)
+ break;
+ } else if (done_count) {
+ done_count = 0;
+ }
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "Timeout of video streamclk ok\n");
+ return -ETIMEDOUT;
+ }
+
+ usleep_range(1000, 1001);
+ }
+
+ if (retval != 0)
+ dev_err(dp->dev, "Video stream is not detected!\n");
+
+ return retval;
+}
+
+static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable)
+{
+ u8 data;
+
+ if (enable) {
+ exynos_dp_enable_scrambling(dp);
+
+ exynos_dp_read_byte_from_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ &data);
+ exynos_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
+ } else {
+ exynos_dp_disable_scrambling(dp);
+
+ exynos_dp_read_byte_from_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ &data);
+ exynos_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
+ }
+}
+
+static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
+{
+ struct exynos_dp_device *dp = arg;
+
+ enum dp_irq_type irq_type;
+
+ irq_type = exynos_dp_get_irq_type(dp);
+ switch (irq_type) {
+ case DP_IRQ_TYPE_HP_CABLE_IN:
+ dev_dbg(dp->dev, "Received irq - cable in\n");
+ schedule_work(&dp->hotplug_work);
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ case DP_IRQ_TYPE_HP_CABLE_OUT:
+ dev_dbg(dp->dev, "Received irq - cable out\n");
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ case DP_IRQ_TYPE_HP_CHANGE:
+ /*
+ * We get these change notifications once in a while, but there
+ * is nothing we can do with them. Just ignore it for now and
+ * only handle cable changes.
+ */
+ dev_dbg(dp->dev, "Received irq - hotplug change; ignoring.\n");
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ default:
+ dev_err(dp->dev, "Received irq - unknown type!\n");
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+static void exynos_dp_hotplug(struct work_struct *work)
+{
+ struct exynos_dp_device *dp;
+ int ret;
+
+ dp = container_of(work, struct exynos_dp_device, hotplug_work);
+
+ ret = exynos_dp_detect_hpd(dp);
+ if (ret) {
+ /* Cable has been disconnected, we're done */
+ return;
+ }
+
+ ret = exynos_dp_handle_edid(dp);
+ if (ret) {
+ dev_err(dp->dev, "unable to handle edid\n");
+ return;
+ }
+
+ ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count,
+ dp->video_info->link_rate);
+ if (ret) {
+ dev_err(dp->dev, "unable to do link train\n");
+ return;
+ }
+
+ exynos_dp_enable_scramble(dp, 1);
+ exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
+ exynos_dp_enable_enhanced_mode(dp, 1);
+
+ exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
+ exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
+
+ exynos_dp_init_video(dp);
+ ret = exynos_dp_config_video(dp);
+ if (ret)
+ dev_err(dp->dev, "unable to config video\n");
+}
+
+static enum drm_connector_status exynos_dp_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void exynos_dp_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs exynos_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = exynos_dp_detect,
+ .destroy = exynos_dp_connector_destroy,
+};
+
+static int exynos_dp_get_modes(struct drm_connector *connector)
+{
+ struct exynos_dp_device *dp = ctx_from_connector(connector);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode.\n");
+ return 0;
+ }
+
+ drm_display_mode_from_videomode(&dp->panel.vm, mode);
+ mode->width_mm = dp->panel.width_mm;
+ mode->height_mm = dp->panel.height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static struct drm_encoder *exynos_dp_best_encoder(
+ struct drm_connector *connector)
+{
+ struct exynos_dp_device *dp = ctx_from_connector(connector);
+
+ return dp->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
+ .get_modes = exynos_dp_get_modes,
+ .best_encoder = exynos_dp_best_encoder,
+};
+
+static bool find_bridge(const char *compat, struct bridge_init *bridge)
+{
+ bridge->client = NULL;
+ bridge->node = of_find_compatible_node(NULL, NULL, compat);
+ if (!bridge->node)
+ return false;
+
+ bridge->client = of_find_i2c_device_by_node(bridge->node);
+ if (!bridge->client)
+ return false;
+
+ return true;
+}
+
+/* returns the number of bridges attached */
+static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct bridge_init bridge;
+ int ret;
+
+ if (find_bridge("nxp,ptn3460", &bridge)) {
+ ret = ptn3460_init(dev, encoder, bridge.client, bridge.node);
+ if (!ret)
+ return 1;
+ }
+ return 0;
+}
+
+static int exynos_dp_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct exynos_dp_device *dp = display->ctx;
+ struct drm_connector *connector = &dp->connector;
+ int ret;
+
+ dp->encoder = encoder;
+
+ /* Pre-empt DP connector creation if there's a bridge */
+ ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder);
+ if (ret)
+ return 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(dp->drm_dev, connector,
+ &exynos_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+{
+ if (dp->phy) {
+ phy_power_on(dp->phy);
+ } else if (dp->phy_addr) {
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg |= dp->enable_mask;
+ __raw_writel(reg, dp->phy_addr);
+ }
+}
+
+static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+{
+ if (dp->phy) {
+ phy_power_off(dp->phy);
+ } else if (dp->phy_addr) {
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg &= ~(dp->enable_mask);
+ __raw_writel(reg, dp->phy_addr);
+ }
+}
+
+static void exynos_dp_poweron(struct exynos_dp_device *dp)
+{
+ if (dp->dpms_mode == DRM_MODE_DPMS_ON)
+ return;
+
+ clk_prepare_enable(dp->clock);
+ exynos_dp_phy_init(dp);
+ exynos_dp_init_dp(dp);
+ enable_irq(dp->irq);
+}
+
+static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+{
+ if (dp->dpms_mode != DRM_MODE_DPMS_ON)
+ return;
+
+ disable_irq(dp->irq);
+ flush_work(&dp->hotplug_work);
+ exynos_dp_phy_exit(dp);
+ clk_disable_unprepare(dp->clock);
+}
+
+static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
+{
+ struct exynos_dp_device *dp = display->ctx;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_dp_poweron(dp);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_dp_poweroff(dp);
+ break;
+ default:
+ break;
+ }
+ dp->dpms_mode = mode;
+}
+
+static struct exynos_drm_display_ops exynos_dp_display_ops = {
+ .create_connector = exynos_dp_create_connector,
+ .dpms = exynos_dp_dpms,
+};
+
+static struct exynos_drm_display exynos_dp_display = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &exynos_dp_display_ops,
+};
+
+static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
+{
+ struct device_node *dp_node = dev->of_node;
+ struct video_info *dp_video_config;
+
+ dp_video_config = devm_kzalloc(dev,
+ sizeof(*dp_video_config), GFP_KERNEL);
+ if (!dp_video_config)
+ return ERR_PTR(-ENOMEM);
+
+ dp_video_config->h_sync_polarity =
+ of_property_read_bool(dp_node, "hsync-active-high");
+
+ dp_video_config->v_sync_polarity =
+ of_property_read_bool(dp_node, "vsync-active-high");
+
+ dp_video_config->interlaced =
+ of_property_read_bool(dp_node, "interlaced");
+
+ if (of_property_read_u32(dp_node, "samsung,color-space",
+ &dp_video_config->color_space)) {
+ dev_err(dev, "failed to get color-space\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,dynamic-range",
+ &dp_video_config->dynamic_range)) {
+ dev_err(dev, "failed to get dynamic-range\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
+ &dp_video_config->ycbcr_coeff)) {
+ dev_err(dev, "failed to get ycbcr-coeff\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,color-depth",
+ &dp_video_config->color_depth)) {
+ dev_err(dev, "failed to get color-depth\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,link-rate",
+ &dp_video_config->link_rate)) {
+ dev_err(dev, "failed to get link-rate\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,lane-count",
+ &dp_video_config->lane_count)) {
+ dev_err(dev, "failed to get lane-count\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return dp_video_config;
+}
+
+static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
+{
+ struct device_node *dp_phy_node = of_node_get(dp->dev->of_node);
+ u32 phy_base;
+ int ret = 0;
+
+ dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
+ if (!dp_phy_node) {
+ dp->phy = devm_phy_get(dp->dev, "dp");
+ return PTR_ERR_OR_ZERO(dp->phy);
+ }
+
+ if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
+ dev_err(dp->dev, "failed to get reg for dptx-phy\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
+ &dp->enable_mask)) {
+ dev_err(dp->dev, "failed to get enable-mask for dptx-phy\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dp->phy_addr = ioremap(phy_base, SZ_4);
+ if (!dp->phy_addr) {
+ dev_err(dp->dev, "failed to ioremap dp-phy\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+err:
+ of_node_put(dp_phy_node);
+
+ return ret;
+}
+
+static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
+{
+ int ret;
+
+ ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm,
+ OF_USE_NATIVE_MODE);
+ if (ret) {
+ DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm_dev = data;
+ struct resource *res;
+ struct exynos_dp_device *dp;
+ unsigned int irq_flags;
+
+ int ret = 0;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dev = &pdev->dev;
+ dp->dpms_mode = DRM_MODE_DPMS_OFF;
+
+ dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev);
+ if (IS_ERR(dp->video_info))
+ return PTR_ERR(dp->video_info);
+
+ ret = exynos_dp_dt_parse_phydata(dp);
+ if (ret)
+ return ret;
+
+ ret = exynos_dp_dt_parse_panel(dp);
+ if (ret)
+ return ret;
+
+ dp->clock = devm_clk_get(&pdev->dev, "dp");
+ if (IS_ERR(dp->clock)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(dp->clock);
+ }
+
+ clk_prepare_enable(dp->clock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp->reg_base))
+ return PTR_ERR(dp->reg_base);
+
+ dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0);
+
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ /*
+ * Set up the hotplug GPIO from the device tree as an interrupt.
+ * Simply specifying a different interrupt in the device tree
+ * doesn't work since we handle hotplug rather differently when
+ * using a GPIO. We also need the actual GPIO specifier so
+ * that we can get the current state of the GPIO.
+ */
+ ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
+ "hpd_gpio");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get hpd gpio\n");
+ return ret;
+ }
+ dp->irq = gpio_to_irq(dp->hpd_gpio);
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ } else {
+ dp->hpd_gpio = -ENODEV;
+ dp->irq = platform_get_irq(pdev, 0);
+ irq_flags = 0;
+ }
+
+ if (dp->irq == -ENXIO) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+
+ INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
+
+ exynos_dp_phy_init(dp);
+
+ exynos_dp_init_dp(dp);
+
+ ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
+ irq_flags, "exynos-dp", dp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+ disable_irq(dp->irq);
+
+ dp->drm_dev = drm_dev;
+ exynos_dp_display.ctx = dp;
+
+ platform_set_drvdata(pdev, &exynos_dp_display);
+
+ return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
+}
+
+static void exynos_dp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dp_device *dp = display->ctx;
+ struct drm_encoder *encoder = dp->encoder;
+
+ exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&dp->connector);
+}
+
+static const struct component_ops exynos_dp_ops = {
+ .bind = exynos_dp_bind,
+ .unbind = exynos_dp_unbind,
+};
+
+static int exynos_dp_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ exynos_dp_display.type);
+ if (ret)
+ return ret;
+
+ ret = component_add(&pdev->dev, &exynos_dp_ops);
+ if (ret)
+ exynos_drm_component_del(&pdev->dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return ret;
+}
+
+static int exynos_dp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &exynos_dp_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_dp_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_drm_display *display = platform_get_drvdata(pdev);
+
+ exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+ return 0;
+}
+
+static int exynos_dp_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_drm_display *display = platform_get_drvdata(pdev);
+
+ exynos_dp_dpms(display, DRM_MODE_DPMS_ON);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_dp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
+};
+
+static const struct of_device_id exynos_dp_match[] = {
+ { .compatible = "samsung,exynos5-dp" },
+ {},
+};
+
+struct platform_driver dp_driver = {
+ .probe = exynos_dp_probe,
+ .remove = exynos_dp_remove,
+ .driver = {
+ .name = "exynos-dp",
+ .owner = THIS_MODULE,
+ .pm = &exynos_dp_pm_ops,
+ .of_match_table = exynos_dp_match,
+ },
+};
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DP Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
new file mode 100644
index 00000000000..02cc4f9ab90
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -0,0 +1,279 @@
+/*
+ * Header file for Samsung DP (Display Port) interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DP_CORE_H
+#define _EXYNOS_DP_CORE_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/exynos_drm.h>
+
+#define DP_TIMEOUT_LOOP_COUNT 100
+#define MAX_CR_LOOP 5
+#define MAX_EQ_LOOP 5
+
+enum link_rate_type {
+ LINK_RATE_1_62GBPS = 0x06,
+ LINK_RATE_2_70GBPS = 0x0a
+};
+
+enum link_lane_count_type {
+ LANE_COUNT1 = 1,
+ LANE_COUNT2 = 2,
+ LANE_COUNT4 = 4
+};
+
+enum link_training_state {
+ START,
+ CLOCK_RECOVERY,
+ EQUALIZER_TRAINING,
+ FINISHED,
+ FAILED
+};
+
+enum voltage_swing_level {
+ VOLTAGE_LEVEL_0,
+ VOLTAGE_LEVEL_1,
+ VOLTAGE_LEVEL_2,
+ VOLTAGE_LEVEL_3,
+};
+
+enum pre_emphasis_level {
+ PRE_EMPHASIS_LEVEL_0,
+ PRE_EMPHASIS_LEVEL_1,
+ PRE_EMPHASIS_LEVEL_2,
+ PRE_EMPHASIS_LEVEL_3,
+};
+
+enum pattern_set {
+ PRBS7,
+ D10_2,
+ TRAINING_PTN1,
+ TRAINING_PTN2,
+ DP_NONE
+};
+
+enum color_space {
+ COLOR_RGB,
+ COLOR_YCBCR422,
+ COLOR_YCBCR444
+};
+
+enum color_depth {
+ COLOR_6,
+ COLOR_8,
+ COLOR_10,
+ COLOR_12
+};
+
+enum color_coefficient {
+ COLOR_YCBCR601,
+ COLOR_YCBCR709
+};
+
+enum dynamic_range {
+ VESA,
+ CEA
+};
+
+enum pll_status {
+ PLL_UNLOCKED,
+ PLL_LOCKED
+};
+
+enum clock_recovery_m_value_type {
+ CALCULATED_M,
+ REGISTER_M
+};
+
+enum video_timing_recognition_type {
+ VIDEO_TIMING_FROM_CAPTURE,
+ VIDEO_TIMING_FROM_REGISTER
+};
+
+enum analog_power_block {
+ AUX_BLOCK,
+ CH0_BLOCK,
+ CH1_BLOCK,
+ CH2_BLOCK,
+ CH3_BLOCK,
+ ANALOG_TOTAL,
+ POWER_ALL
+};
+
+enum dp_irq_type {
+ DP_IRQ_TYPE_HP_CABLE_IN,
+ DP_IRQ_TYPE_HP_CABLE_OUT,
+ DP_IRQ_TYPE_HP_CHANGE,
+ DP_IRQ_TYPE_UNKNOWN,
+};
+
+struct video_info {
+ char *name;
+
+ bool h_sync_polarity;
+ bool v_sync_polarity;
+ bool interlaced;
+
+ enum color_space color_space;
+ enum dynamic_range dynamic_range;
+ enum color_coefficient ycbcr_coeff;
+ enum color_depth color_depth;
+
+ enum link_rate_type link_rate;
+ enum link_lane_count_type lane_count;
+};
+
+struct link_train {
+ int eq_loop;
+ int cr_loop[4];
+
+ u8 link_rate;
+ u8 lane_count;
+ u8 training_lane[4];
+
+ enum link_training_state lt_state;
+};
+
+struct exynos_dp_device {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
+ struct clk *clock;
+ unsigned int irq;
+ void __iomem *reg_base;
+ void __iomem *phy_addr;
+ unsigned int enable_mask;
+
+ struct video_info *video_info;
+ struct link_train link_train;
+ struct work_struct hotplug_work;
+ struct phy *phy;
+ int dpms_mode;
+ int hpd_gpio;
+
+ struct exynos_drm_panel_info panel;
+};
+
+/* exynos_dp_reg.c */
+void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_stop_video(struct exynos_dp_device *dp);
+void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
+void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
+void exynos_dp_reset(struct exynos_dp_device *dp);
+void exynos_dp_swreset(struct exynos_dp_device *dp);
+void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
+enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
+void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
+ enum analog_power_block block,
+ bool enable);
+void exynos_dp_init_analog_func(struct exynos_dp_device *dp);
+void exynos_dp_init_hpd(struct exynos_dp_device *dp);
+enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp);
+void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp);
+void exynos_dp_reset_aux(struct exynos_dp_device *dp);
+void exynos_dp_init_aux(struct exynos_dp_device *dp);
+int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp);
+void exynos_dp_enable_sw_function(struct exynos_dp_device *dp);
+int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp);
+int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char data);
+int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char *data);
+int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[]);
+int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[]);
+int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr);
+int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int *data);
+int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char edid[]);
+void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype);
+void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype);
+void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count);
+void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count);
+void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
+ enum pattern_set pattern);
+void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level);
+void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level);
+void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level);
+void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level);
+void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp,
+ u32 training_lane);
+void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp,
+ u32 training_lane);
+void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp,
+ u32 training_lane);
+void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp,
+ u32 training_lane);
+u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp);
+u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp);
+u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp);
+u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp);
+void exynos_dp_reset_macro(struct exynos_dp_device *dp);
+void exynos_dp_init_video(struct exynos_dp_device *dp);
+
+void exynos_dp_set_video_color_format(struct exynos_dp_device *dp);
+int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp);
+void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
+ enum clock_recovery_m_value_type type,
+ u32 m_value,
+ u32 n_value);
+void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type);
+void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_start_video(struct exynos_dp_device *dp);
+int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp);
+void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp);
+void exynos_dp_enable_scrambling(struct exynos_dp_device *dp);
+void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
+
+/* I2C EDID Chip ID, Slave Address */
+#define I2C_EDID_DEVICE_ADDR 0x50
+#define I2C_E_EDID_DEVICE_ADDR 0x30
+
+#define EDID_BLOCK_LENGTH 0x80
+#define EDID_HEADER_PATTERN 0x00
+#define EDID_EXTENSION_FLAG 0x7e
+#define EDID_CHECKSUM 0x7f
+
+/* DP_MAX_LANE_COUNT */
+#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
+#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
+
+/* DP_LANE_COUNT_SET */
+#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f)
+
+/* DP_TRAINING_LANE0_SET */
+#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3)
+#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3)
+#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0)
+#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3)
+
+#endif /* _EXYNOS_DP_CORE_H */
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c
new file mode 100644
index 00000000000..c1f87a2a928
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.c
@@ -0,0 +1,1263 @@
+/*
+ * Samsung DP (Display port) register interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include "exynos_dp_core.h"
+#include "exynos_dp_reg.h"
+
+#define COMMON_INT_MASK_1 0
+#define COMMON_INT_MASK_2 0
+#define COMMON_INT_MASK_3 0
+#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG)
+#define INT_STA_MASK INT_HPD
+
+void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+ reg |= HDCP_VIDEO_MUTE;
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+ reg &= ~HDCP_VIDEO_MUTE;
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+ }
+}
+
+void exynos_dp_stop_video(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+ reg &= ~VIDEO_EN;
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+}
+
+void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable)
+ reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 |
+ LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3;
+ else
+ reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 |
+ LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0;
+
+ writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
+}
+
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = TX_TERMINAL_CTRL_50_OHM;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
+
+ reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
+
+ reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
+
+ reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+ TX_CUR1_2X | TX_CUR_16_MA;
+ writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
+
+ reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+ CH1_AMP_400_MV | CH0_AMP_400_MV;
+ writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
+}
+
+void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
+{
+ /* Set interrupt pin assertion polarity as high */
+ writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL);
+
+ /* Clear pending regisers */
+ writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
+ writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2);
+ writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3);
+ writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
+ writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA);
+
+ /* 0:mask,1: unmask */
+ writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
+ writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
+ writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
+ writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
+ writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
+}
+
+void exynos_dp_reset(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ exynos_dp_stop_video(dp);
+ exynos_dp_enable_video_mute(dp, 0);
+
+ reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
+ AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
+ HDCP_FUNC_EN_N | SW_FUNC_EN_N;
+ writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
+
+ reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
+ SERDES_FIFO_FUNC_EN_N |
+ LS_CLK_DOMAIN_FUNC_EN_N;
+ writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
+
+ usleep_range(20, 30);
+
+ exynos_dp_lane_swap(dp, 0);
+
+ writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
+ writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
+ writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+ writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+
+ writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL);
+ writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL);
+
+ writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L);
+ writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H);
+
+ writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL);
+
+ writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST);
+
+ writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD);
+ writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN);
+
+ writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH);
+ writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH);
+
+ writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+}
+
+void exynos_dp_swreset(struct exynos_dp_device *dp)
+{
+ writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
+}
+
+void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ /* 0: mask, 1: unmask */
+ reg = COMMON_INT_MASK_1;
+ writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
+
+ reg = COMMON_INT_MASK_2;
+ writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
+
+ reg = COMMON_INT_MASK_3;
+ writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
+
+ reg = COMMON_INT_MASK_4;
+ writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
+
+ reg = INT_STA_MASK;
+ writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
+}
+
+enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
+ if (reg & PLL_LOCK)
+ return PLL_LOCKED;
+ else
+ return PLL_UNLOCKED;
+}
+
+void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
+ reg |= DP_PLL_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
+ reg &= ~DP_PLL_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
+ }
+}
+
+void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
+ enum analog_power_block block,
+ bool enable)
+{
+ u32 reg;
+
+ switch (block) {
+ case AUX_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg |= AUX_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg &= ~AUX_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ }
+ break;
+ case CH0_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg |= CH0_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg &= ~CH0_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ }
+ break;
+ case CH1_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg |= CH1_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg &= ~CH1_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ }
+ break;
+ case CH2_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg |= CH2_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg &= ~CH2_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ }
+ break;
+ case CH3_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg |= CH3_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg &= ~CH3_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ }
+ break;
+ case ANALOG_TOTAL:
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg |= DP_PHY_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
+ reg &= ~DP_PHY_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ }
+ break;
+ case POWER_ALL:
+ if (enable) {
+ reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
+ CH1_PD | CH0_PD;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
+ } else {
+ writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
+{
+ u32 reg;
+ int timeout_loop = 0;
+
+ exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
+
+ reg = PLL_LOCK_CHG;
+ writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
+ reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL);
+ writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
+
+ /* Power up PLL */
+ if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ exynos_dp_set_pll_power_down(dp, 0);
+
+ while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ timeout_loop++;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "failed to get pll lock status\n");
+ return;
+ }
+ usleep_range(10, 20);
+ }
+ }
+
+ /* Enable Serdes FIFO function and Link symbol clock domain module */
+ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
+ reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
+ | AUX_FUNC_EN_N);
+ writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
+}
+
+void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio))
+ return;
+
+ reg = HOTPLUG_CHG | HPD_LOST | PLUG;
+ writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
+
+ reg = INT_HPD;
+ writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
+}
+
+void exynos_dp_init_hpd(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio))
+ return;
+
+ exynos_dp_clear_hotplug_interrupts(dp);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+ reg &= ~(F_HPD | HPD_CTRL);
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+}
+
+enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ reg = gpio_get_value(dp->hpd_gpio);
+ if (reg)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
+ else
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
+ } else {
+ /* Parse hotplug interrupt status register */
+ reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
+
+ if (reg & PLUG)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
+
+ if (reg & HPD_LOST)
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
+
+ if (reg & HOTPLUG_CHG)
+ return DP_IRQ_TYPE_HP_CHANGE;
+
+ return DP_IRQ_TYPE_UNKNOWN;
+ }
+}
+
+void exynos_dp_reset_aux(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ /* Disable AUX channel module */
+ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
+ reg |= AUX_FUNC_EN_N;
+ writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
+}
+
+void exynos_dp_init_aux(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ /* Clear inerrupts related to AUX channel */
+ reg = RPLY_RECEIV | AUX_ERR;
+ writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
+
+ exynos_dp_reset_aux(dp);
+
+ /* Disable AUX transaction H/W retry */
+ reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)|
+ AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL);
+
+ /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
+ reg = DEFER_CTRL_EN | DEFER_COUNT(1);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL);
+
+ /* Enable AUX channel module */
+ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
+ reg &= ~AUX_FUNC_EN_N;
+ writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
+}
+
+int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ if (gpio_get_value(dp->hpd_gpio))
+ return 0;
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+ if (reg & HPD_STATUS)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void exynos_dp_enable_sw_function(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
+ reg &= ~SW_FUNC_EN_N;
+ writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
+}
+
+int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp)
+{
+ int reg;
+ int retval = 0;
+ int timeout_loop = 0;
+
+ /* Enable AUX CH operation */
+ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
+ reg |= AUX_EN;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
+
+ /* Is AUX CH command reply received? */
+ reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
+ while (!(reg & RPLY_RECEIV)) {
+ timeout_loop++;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "AUX CH command reply failed!\n");
+ return -ETIMEDOUT;
+ }
+ reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
+ usleep_range(10, 11);
+ }
+
+ /* Clear interrupt source for AUX CH command reply */
+ writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA);
+
+ /* Clear interrupt source for AUX CH access error */
+ reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
+ if (reg & AUX_ERR) {
+ writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA);
+ return -EREMOTEIO;
+ }
+
+ /* Check AUX CH error access status */
+ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA);
+ if ((reg & AUX_STATUS_MASK) != 0) {
+ dev_err(dp->dev, "AUX CH error happens: %d\n\n",
+ reg & AUX_STATUS_MASK);
+ return -EREMOTEIO;
+ }
+
+ return retval;
+}
+
+int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char data)
+{
+ u32 reg;
+ int i;
+ int retval;
+
+ for (i = 0; i < 3; i++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
+
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
+
+ /* Write data buffer */
+ reg = (unsigned int)data;
+ writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
+
+ /*
+ * Set DisplayPort transaction and write 1 byte
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = exynos_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+ else
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+
+ return retval;
+}
+
+int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char *data)
+{
+ u32 reg;
+ int i;
+ int retval;
+
+ for (i = 0; i < 3; i++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
+
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
+
+ /*
+ * Set DisplayPort transaction and read 1 byte
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = exynos_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+ else
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+
+ /* Read data buffer */
+ reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
+ *data = (unsigned char)(reg & 0xff);
+
+ return retval;
+}
+
+int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[])
+{
+ u32 reg;
+ unsigned int start_offset;
+ unsigned int cur_data_count;
+ unsigned int cur_data_idx;
+ int i;
+ int retval = 0;
+
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
+
+ start_offset = 0;
+ while (start_offset < count) {
+ /* Buffer size of AUX CH is 16 * 4bytes */
+ if ((count - start_offset) > 16)
+ cur_data_count = 16;
+ else
+ cur_data_count = count - start_offset;
+
+ for (i = 0; i < 3; i++) {
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr + start_offset);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr + start_offset);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr + start_offset);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
+
+ for (cur_data_idx = 0; cur_data_idx < cur_data_count;
+ cur_data_idx++) {
+ reg = data[start_offset + cur_data_idx];
+ writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0
+ + 4 * cur_data_idx);
+ }
+
+ /*
+ * Set DisplayPort transaction and write
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_LENGTH(cur_data_count) |
+ AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = exynos_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+ else
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+
+ start_offset += cur_data_count;
+ }
+
+ return retval;
+}
+
+int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[])
+{
+ u32 reg;
+ unsigned int start_offset;
+ unsigned int cur_data_count;
+ unsigned int cur_data_idx;
+ int i;
+ int retval = 0;
+
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
+
+ start_offset = 0;
+ while (start_offset < count) {
+ /* Buffer size of AUX CH is 16 * 4bytes */
+ if ((count - start_offset) > 16)
+ cur_data_count = 16;
+ else
+ cur_data_count = count - start_offset;
+
+ /* AUX CH Request Transaction process */
+ for (i = 0; i < 3; i++) {
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr + start_offset);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr + start_offset);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr + start_offset);
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
+
+ /*
+ * Set DisplayPort transaction and read
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_LENGTH(cur_data_count) |
+ AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = exynos_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+ else
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+
+ for (cur_data_idx = 0; cur_data_idx < cur_data_count;
+ cur_data_idx++) {
+ reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
+ + 4 * cur_data_idx);
+ data[start_offset + cur_data_idx] =
+ (unsigned char)reg;
+ }
+
+ start_offset += cur_data_count;
+ }
+
+ return retval;
+}
+
+int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr)
+{
+ u32 reg;
+ int retval;
+
+ /* Set EDID device address */
+ reg = device_addr;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
+ writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
+ writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
+
+ /* Set offset from base address of EDID device */
+ writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
+
+ /*
+ * Set I2C transaction and write address
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
+ AUX_TX_COMM_WRITE;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = exynos_dp_start_aux_transaction(dp);
+ if (retval != 0)
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+
+ return retval;
+}
+
+int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int *data)
+{
+ u32 reg;
+ int i;
+ int retval;
+
+ for (i = 0; i < 3; i++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
+
+ /* Select EDID device */
+ retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr);
+ if (retval != 0)
+ continue;
+
+ /*
+ * Set I2C transaction and read data
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_I2C_TRANSACTION |
+ AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = exynos_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+ else
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+
+ /* Read data */
+ if (retval == 0)
+ *data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
+
+ return retval;
+}
+
+int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char edid[])
+{
+ u32 reg;
+ unsigned int i, j;
+ unsigned int cur_data_idx;
+ unsigned int defer = 0;
+ int retval = 0;
+
+ for (i = 0; i < count; i += 16) {
+ for (j = 0; j < 3; j++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
+
+ /* Set normal AUX CH command */
+ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
+ reg &= ~ADDR_ONLY;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
+
+ /*
+ * If Rx sends defer, Tx sends only reads
+ * request without sending address
+ */
+ if (!defer)
+ retval = exynos_dp_select_i2c_device(dp,
+ device_addr, reg_addr + i);
+ else
+ defer = 0;
+
+ if (retval == 0) {
+ /*
+ * Set I2C transaction and write data
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_LENGTH(16) |
+ AUX_TX_COMM_I2C_TRANSACTION |
+ AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base +
+ EXYNOS_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = exynos_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+ else
+ dev_dbg(dp->dev,
+ "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+ /* Check if Rx sends defer */
+ reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM);
+ if (reg == AUX_RX_COMM_AUX_DEFER ||
+ reg == AUX_RX_COMM_I2C_DEFER) {
+ dev_err(dp->dev, "Defer: %d\n\n", reg);
+ defer = 1;
+ }
+ }
+
+ for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
+ reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
+ + 4 * cur_data_idx);
+ edid[i + cur_data_idx] = (unsigned char)reg;
+ }
+ }
+
+ return retval;
+}
+
+void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype)
+{
+ u32 reg;
+
+ reg = bwtype;
+ if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS))
+ writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET);
+}
+
+void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET);
+ *bwtype = reg;
+}
+
+void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count)
+{
+ u32 reg;
+
+ reg = count;
+ writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
+}
+
+void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
+ *count = reg;
+}
+
+void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+ reg |= ENHANCED;
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+ reg &= ~ENHANCED;
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+ }
+}
+
+void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
+ enum pattern_set pattern)
+{
+ u32 reg;
+
+ switch (pattern) {
+ case PRBS7:
+ reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7;
+ writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+ break;
+ case D10_2:
+ reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2;
+ writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+ break;
+ case TRAINING_PTN1:
+ reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1;
+ writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+ break;
+ case TRAINING_PTN2:
+ reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2;
+ writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+ break;
+ case DP_NONE:
+ reg = SCRAMBLING_ENABLE |
+ LINK_QUAL_PATTERN_SET_DISABLE |
+ SW_TRAINING_PATTERN_SET_NORMAL;
+ writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+ break;
+ default:
+ break;
+ }
+}
+
+void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
+}
+
+void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
+}
+
+void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
+}
+
+void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
+}
+
+void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
+}
+
+void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
+}
+
+void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
+}
+
+void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
+}
+
+u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
+ return reg;
+}
+
+u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
+ return reg;
+}
+
+u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
+ return reg;
+}
+
+u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
+ return reg;
+}
+
+void exynos_dp_reset_macro(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST);
+ reg |= MACRO_RST;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
+
+ /* 10 us is the minimum reset time. */
+ usleep_range(10, 20);
+
+ reg &= ~MACRO_RST;
+ writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
+}
+
+void exynos_dp_init_video(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG;
+ writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
+
+ reg = 0x0;
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
+
+ reg = CHA_CRI(4) | CHA_CTRL;
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
+
+ reg = 0x0;
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+
+ reg = VID_HRES_TH(2) | VID_VRES_TH(0);
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8);
+}
+
+void exynos_dp_set_video_color_format(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ /* Configure the input color depth, color space, dynamic range */
+ reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) |
+ (dp->video_info->color_depth << IN_BPC_SHIFT) |
+ (dp->video_info->color_space << IN_COLOR_F_SHIFT);
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2);
+
+ /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
+ reg &= ~IN_YC_COEFFI_MASK;
+ if (dp->video_info->ycbcr_coeff)
+ reg |= IN_YC_COEFFI_ITU709;
+ else
+ reg |= IN_YC_COEFFI_ITU601;
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
+}
+
+int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
+
+ if (!(reg & DET_STA)) {
+ dev_dbg(dp->dev, "Input stream clock not detected.\n");
+ return -EINVAL;
+ }
+
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
+ dev_dbg(dp->dev, "wait SYS_CTL_2.\n");
+
+ if (reg & CHA_STA) {
+ dev_dbg(dp->dev, "Input stream clk is changing\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
+ enum clock_recovery_m_value_type type,
+ u32 m_value,
+ u32 n_value)
+{
+ u32 reg;
+
+ if (type == REGISTER_M) {
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+ reg |= FIX_M_VID;
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+ reg = m_value & 0xff;
+ writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0);
+ reg = (m_value >> 8) & 0xff;
+ writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1);
+ reg = (m_value >> 16) & 0xff;
+ writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2);
+
+ reg = n_value & 0xff;
+ writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0);
+ reg = (n_value >> 8) & 0xff;
+ writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1);
+ reg = (n_value >> 16) & 0xff;
+ writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+ reg &= ~FIX_M_VID;
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
+
+ writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0);
+ writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1);
+ writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2);
+ }
+}
+
+void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type)
+{
+ u32 reg;
+
+ if (type == VIDEO_TIMING_FROM_CAPTURE) {
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+ reg &= ~FORMAT_SEL;
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+ reg |= FORMAT_SEL;
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+ }
+}
+
+void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+ reg &= ~VIDEO_MODE_MASK;
+ reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE;
+ writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+ reg &= ~VIDEO_MODE_MASK;
+ reg |= VIDEO_MODE_SLAVE_MODE;
+ writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+ }
+}
+
+void exynos_dp_start_video(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+ reg |= VIDEO_EN;
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
+}
+
+int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+ writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+ if (!(reg & STRM_VALID)) {
+ dev_dbg(dp->dev, "Input video stream is not detected.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
+ reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N);
+ reg |= MASTER_VID_FUNC_EN_N;
+ writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+ reg &= ~INTERACE_SCAN_CFG;
+ reg |= (dp->video_info->interlaced << 2);
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+ reg &= ~VSYNC_POLARITY_CFG;
+ reg |= (dp->video_info->v_sync_polarity << 1);
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+
+ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+ reg &= ~HSYNC_POLARITY_CFG;
+ reg |= (dp->video_info->h_sync_polarity << 0);
+ writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
+
+ reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
+ writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+}
+
+void exynos_dp_enable_scrambling(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+ reg &= ~SCRAMBLING_DISABLE;
+ writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+}
+
+void exynos_dp_disable_scrambling(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+ reg |= SCRAMBLING_DISABLE;
+ writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.h b/drivers/gpu/drm/exynos/exynos_dp_reg.h
new file mode 100644
index 00000000000..2e9bd0e0b9f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.h
@@ -0,0 +1,366 @@
+/*
+ * Register definition file for Samsung DP driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _EXYNOS_DP_REG_H
+#define _EXYNOS_DP_REG_H
+
+#define EXYNOS_DP_TX_SW_RESET 0x14
+#define EXYNOS_DP_FUNC_EN_1 0x18
+#define EXYNOS_DP_FUNC_EN_2 0x1C
+#define EXYNOS_DP_VIDEO_CTL_1 0x20
+#define EXYNOS_DP_VIDEO_CTL_2 0x24
+#define EXYNOS_DP_VIDEO_CTL_3 0x28
+
+#define EXYNOS_DP_VIDEO_CTL_8 0x3C
+#define EXYNOS_DP_VIDEO_CTL_10 0x44
+
+#define EXYNOS_DP_LANE_MAP 0x35C
+
+#define EXYNOS_DP_ANALOG_CTL_1 0x370
+#define EXYNOS_DP_ANALOG_CTL_2 0x374
+#define EXYNOS_DP_ANALOG_CTL_3 0x378
+#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C
+#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380
+
+#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390
+
+#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4
+#define EXYNOS_DP_COMMON_INT_STA_2 0x3C8
+#define EXYNOS_DP_COMMON_INT_STA_3 0x3CC
+#define EXYNOS_DP_COMMON_INT_STA_4 0x3D0
+#define EXYNOS_DP_INT_STA 0x3DC
+#define EXYNOS_DP_COMMON_INT_MASK_1 0x3E0
+#define EXYNOS_DP_COMMON_INT_MASK_2 0x3E4
+#define EXYNOS_DP_COMMON_INT_MASK_3 0x3E8
+#define EXYNOS_DP_COMMON_INT_MASK_4 0x3EC
+#define EXYNOS_DP_INT_STA_MASK 0x3F8
+#define EXYNOS_DP_INT_CTL 0x3FC
+
+#define EXYNOS_DP_SYS_CTL_1 0x600
+#define EXYNOS_DP_SYS_CTL_2 0x604
+#define EXYNOS_DP_SYS_CTL_3 0x608
+#define EXYNOS_DP_SYS_CTL_4 0x60C
+
+#define EXYNOS_DP_PKT_SEND_CTL 0x640
+#define EXYNOS_DP_HDCP_CTL 0x648
+
+#define EXYNOS_DP_LINK_BW_SET 0x680
+#define EXYNOS_DP_LANE_COUNT_SET 0x684
+#define EXYNOS_DP_TRAINING_PTN_SET 0x688
+#define EXYNOS_DP_LN0_LINK_TRAINING_CTL 0x68C
+#define EXYNOS_DP_LN1_LINK_TRAINING_CTL 0x690
+#define EXYNOS_DP_LN2_LINK_TRAINING_CTL 0x694
+#define EXYNOS_DP_LN3_LINK_TRAINING_CTL 0x698
+
+#define EXYNOS_DP_DEBUG_CTL 0x6C0
+#define EXYNOS_DP_HPD_DEGLITCH_L 0x6C4
+#define EXYNOS_DP_HPD_DEGLITCH_H 0x6C8
+#define EXYNOS_DP_LINK_DEBUG_CTL 0x6E0
+
+#define EXYNOS_DP_M_VID_0 0x700
+#define EXYNOS_DP_M_VID_1 0x704
+#define EXYNOS_DP_M_VID_2 0x708
+#define EXYNOS_DP_N_VID_0 0x70C
+#define EXYNOS_DP_N_VID_1 0x710
+#define EXYNOS_DP_N_VID_2 0x714
+
+#define EXYNOS_DP_PLL_CTL 0x71C
+#define EXYNOS_DP_PHY_PD 0x720
+#define EXYNOS_DP_PHY_TEST 0x724
+
+#define EXYNOS_DP_VIDEO_FIFO_THRD 0x730
+#define EXYNOS_DP_AUDIO_MARGIN 0x73C
+
+#define EXYNOS_DP_M_VID_GEN_FILTER_TH 0x764
+#define EXYNOS_DP_M_AUD_GEN_FILTER_TH 0x778
+#define EXYNOS_DP_AUX_CH_STA 0x780
+#define EXYNOS_DP_AUX_CH_DEFER_CTL 0x788
+#define EXYNOS_DP_AUX_RX_COMM 0x78C
+#define EXYNOS_DP_BUFFER_DATA_CTL 0x790
+#define EXYNOS_DP_AUX_CH_CTL_1 0x794
+#define EXYNOS_DP_AUX_ADDR_7_0 0x798
+#define EXYNOS_DP_AUX_ADDR_15_8 0x79C
+#define EXYNOS_DP_AUX_ADDR_19_16 0x7A0
+#define EXYNOS_DP_AUX_CH_CTL_2 0x7A4
+
+#define EXYNOS_DP_BUF_DATA_0 0x7C0
+
+#define EXYNOS_DP_SOC_GENERAL_CTL 0x800
+
+/* EXYNOS_DP_TX_SW_RESET */
+#define RESET_DP_TX (0x1 << 0)
+
+/* EXYNOS_DP_FUNC_EN_1 */
+#define MASTER_VID_FUNC_EN_N (0x1 << 7)
+#define SLAVE_VID_FUNC_EN_N (0x1 << 5)
+#define AUD_FIFO_FUNC_EN_N (0x1 << 4)
+#define AUD_FUNC_EN_N (0x1 << 3)
+#define HDCP_FUNC_EN_N (0x1 << 2)
+#define CRC_FUNC_EN_N (0x1 << 1)
+#define SW_FUNC_EN_N (0x1 << 0)
+
+/* EXYNOS_DP_FUNC_EN_2 */
+#define SSC_FUNC_EN_N (0x1 << 7)
+#define AUX_FUNC_EN_N (0x1 << 2)
+#define SERDES_FIFO_FUNC_EN_N (0x1 << 1)
+#define LS_CLK_DOMAIN_FUNC_EN_N (0x1 << 0)
+
+/* EXYNOS_DP_VIDEO_CTL_1 */
+#define VIDEO_EN (0x1 << 7)
+#define HDCP_VIDEO_MUTE (0x1 << 6)
+
+/* EXYNOS_DP_VIDEO_CTL_1 */
+#define IN_D_RANGE_MASK (0x1 << 7)
+#define IN_D_RANGE_SHIFT (7)
+#define IN_D_RANGE_CEA (0x1 << 7)
+#define IN_D_RANGE_VESA (0x0 << 7)
+#define IN_BPC_MASK (0x7 << 4)
+#define IN_BPC_SHIFT (4)
+#define IN_BPC_12_BITS (0x3 << 4)
+#define IN_BPC_10_BITS (0x2 << 4)
+#define IN_BPC_8_BITS (0x1 << 4)
+#define IN_BPC_6_BITS (0x0 << 4)
+#define IN_COLOR_F_MASK (0x3 << 0)
+#define IN_COLOR_F_SHIFT (0)
+#define IN_COLOR_F_YCBCR444 (0x2 << 0)
+#define IN_COLOR_F_YCBCR422 (0x1 << 0)
+#define IN_COLOR_F_RGB (0x0 << 0)
+
+/* EXYNOS_DP_VIDEO_CTL_3 */
+#define IN_YC_COEFFI_MASK (0x1 << 7)
+#define IN_YC_COEFFI_SHIFT (7)
+#define IN_YC_COEFFI_ITU709 (0x1 << 7)
+#define IN_YC_COEFFI_ITU601 (0x0 << 7)
+#define VID_CHK_UPDATE_TYPE_MASK (0x1 << 4)
+#define VID_CHK_UPDATE_TYPE_SHIFT (4)
+#define VID_CHK_UPDATE_TYPE_1 (0x1 << 4)
+#define VID_CHK_UPDATE_TYPE_0 (0x0 << 4)
+
+/* EXYNOS_DP_VIDEO_CTL_8 */
+#define VID_HRES_TH(x) (((x) & 0xf) << 4)
+#define VID_VRES_TH(x) (((x) & 0xf) << 0)
+
+/* EXYNOS_DP_VIDEO_CTL_10 */
+#define FORMAT_SEL (0x1 << 4)
+#define INTERACE_SCAN_CFG (0x1 << 2)
+#define VSYNC_POLARITY_CFG (0x1 << 1)
+#define HSYNC_POLARITY_CFG (0x1 << 0)
+
+/* EXYNOS_DP_LANE_MAP */
+#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6)
+#define LANE3_MAP_LOGIC_LANE_1 (0x1 << 6)
+#define LANE3_MAP_LOGIC_LANE_2 (0x2 << 6)
+#define LANE3_MAP_LOGIC_LANE_3 (0x3 << 6)
+#define LANE2_MAP_LOGIC_LANE_0 (0x0 << 4)
+#define LANE2_MAP_LOGIC_LANE_1 (0x1 << 4)
+#define LANE2_MAP_LOGIC_LANE_2 (0x2 << 4)
+#define LANE2_MAP_LOGIC_LANE_3 (0x3 << 4)
+#define LANE1_MAP_LOGIC_LANE_0 (0x0 << 2)
+#define LANE1_MAP_LOGIC_LANE_1 (0x1 << 2)
+#define LANE1_MAP_LOGIC_LANE_2 (0x2 << 2)
+#define LANE1_MAP_LOGIC_LANE_3 (0x3 << 2)
+#define LANE0_MAP_LOGIC_LANE_0 (0x0 << 0)
+#define LANE0_MAP_LOGIC_LANE_1 (0x1 << 0)
+#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0)
+#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0)
+
+/* EXYNOS_DP_ANALOG_CTL_1 */
+#define TX_TERMINAL_CTRL_50_OHM (0x1 << 4)
+
+/* EXYNOS_DP_ANALOG_CTL_2 */
+#define SEL_24M (0x1 << 3)
+#define TX_DVDD_BIT_1_0625V (0x4 << 0)
+
+/* EXYNOS_DP_ANALOG_CTL_3 */
+#define DRIVE_DVDD_BIT_1_0625V (0x4 << 5)
+#define VCO_BIT_600_MICRO (0x5 << 0)
+
+/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+#define PD_RING_OSC (0x1 << 6)
+#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4)
+#define TX_CUR1_2X (0x1 << 2)
+#define TX_CUR_16_MA (0x3 << 0)
+
+/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+#define CH3_AMP_400_MV (0x0 << 24)
+#define CH2_AMP_400_MV (0x0 << 16)
+#define CH1_AMP_400_MV (0x0 << 8)
+#define CH0_AMP_400_MV (0x0 << 0)
+
+/* EXYNOS_DP_AUX_HW_RETRY_CTL */
+#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8)
+#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3)
+#define AUX_HW_RETRY_INTERVAL_600_MICROSECONDS (0x0 << 3)
+#define AUX_HW_RETRY_INTERVAL_800_MICROSECONDS (0x1 << 3)
+#define AUX_HW_RETRY_INTERVAL_1000_MICROSECONDS (0x2 << 3)
+#define AUX_HW_RETRY_INTERVAL_1800_MICROSECONDS (0x3 << 3)
+#define AUX_HW_RETRY_COUNT_SEL(x) (((x) & 0x7) << 0)
+
+/* EXYNOS_DP_COMMON_INT_STA_1 */
+#define VSYNC_DET (0x1 << 7)
+#define PLL_LOCK_CHG (0x1 << 6)
+#define SPDIF_ERR (0x1 << 5)
+#define SPDIF_UNSTBL (0x1 << 4)
+#define VID_FORMAT_CHG (0x1 << 3)
+#define AUD_CLK_CHG (0x1 << 2)
+#define VID_CLK_CHG (0x1 << 1)
+#define SW_INT (0x1 << 0)
+
+/* EXYNOS_DP_COMMON_INT_STA_2 */
+#define ENC_EN_CHG (0x1 << 6)
+#define HW_BKSV_RDY (0x1 << 3)
+#define HW_SHA_DONE (0x1 << 2)
+#define HW_AUTH_STATE_CHG (0x1 << 1)
+#define HW_AUTH_DONE (0x1 << 0)
+
+/* EXYNOS_DP_COMMON_INT_STA_3 */
+#define AFIFO_UNDER (0x1 << 7)
+#define AFIFO_OVER (0x1 << 6)
+#define R0_CHK_FLAG (0x1 << 5)
+
+/* EXYNOS_DP_COMMON_INT_STA_4 */
+#define PSR_ACTIVE (0x1 << 7)
+#define PSR_INACTIVE (0x1 << 6)
+#define SPDIF_BI_PHASE_ERR (0x1 << 5)
+#define HOTPLUG_CHG (0x1 << 2)
+#define HPD_LOST (0x1 << 1)
+#define PLUG (0x1 << 0)
+
+/* EXYNOS_DP_INT_STA */
+#define INT_HPD (0x1 << 6)
+#define HW_TRAINING_FINISH (0x1 << 5)
+#define RPLY_RECEIV (0x1 << 1)
+#define AUX_ERR (0x1 << 0)
+
+/* EXYNOS_DP_INT_CTL */
+#define SOFT_INT_CTRL (0x1 << 2)
+#define INT_POL1 (0x1 << 1)
+#define INT_POL0 (0x1 << 0)
+
+/* EXYNOS_DP_SYS_CTL_1 */
+#define DET_STA (0x1 << 2)
+#define FORCE_DET (0x1 << 1)
+#define DET_CTRL (0x1 << 0)
+
+/* EXYNOS_DP_SYS_CTL_2 */
+#define CHA_CRI(x) (((x) & 0xf) << 4)
+#define CHA_STA (0x1 << 2)
+#define FORCE_CHA (0x1 << 1)
+#define CHA_CTRL (0x1 << 0)
+
+/* EXYNOS_DP_SYS_CTL_3 */
+#define HPD_STATUS (0x1 << 6)
+#define F_HPD (0x1 << 5)
+#define HPD_CTRL (0x1 << 4)
+#define HDCP_RDY (0x1 << 3)
+#define STRM_VALID (0x1 << 2)
+#define F_VALID (0x1 << 1)
+#define VALID_CTRL (0x1 << 0)
+
+/* EXYNOS_DP_SYS_CTL_4 */
+#define FIX_M_AUD (0x1 << 4)
+#define ENHANCED (0x1 << 3)
+#define FIX_M_VID (0x1 << 2)
+#define M_VID_UPDATE_CTRL (0x3 << 0)
+
+/* EXYNOS_DP_TRAINING_PTN_SET */
+#define SCRAMBLER_TYPE (0x1 << 9)
+#define HW_LINK_TRAINING_PATTERN (0x1 << 8)
+#define SCRAMBLING_DISABLE (0x1 << 5)
+#define SCRAMBLING_ENABLE (0x0 << 5)
+#define LINK_QUAL_PATTERN_SET_MASK (0x3 << 2)
+#define LINK_QUAL_PATTERN_SET_PRBS7 (0x3 << 2)
+#define LINK_QUAL_PATTERN_SET_D10_2 (0x1 << 2)
+#define LINK_QUAL_PATTERN_SET_DISABLE (0x0 << 2)
+#define SW_TRAINING_PATTERN_SET_MASK (0x3 << 0)
+#define SW_TRAINING_PATTERN_SET_PTN2 (0x2 << 0)
+#define SW_TRAINING_PATTERN_SET_PTN1 (0x1 << 0)
+#define SW_TRAINING_PATTERN_SET_NORMAL (0x0 << 0)
+
+/* EXYNOS_DP_LN0_LINK_TRAINING_CTL */
+#define PRE_EMPHASIS_SET_MASK (0x3 << 3)
+#define PRE_EMPHASIS_SET_SHIFT (3)
+
+/* EXYNOS_DP_DEBUG_CTL */
+#define PLL_LOCK (0x1 << 4)
+#define F_PLL_LOCK (0x1 << 3)
+#define PLL_LOCK_CTRL (0x1 << 2)
+#define PN_INV (0x1 << 0)
+
+/* EXYNOS_DP_PLL_CTL */
+#define DP_PLL_PD (0x1 << 7)
+#define DP_PLL_RESET (0x1 << 6)
+#define DP_PLL_LOOP_BIT_DEFAULT (0x1 << 4)
+#define DP_PLL_REF_BIT_1_1250V (0x5 << 0)
+#define DP_PLL_REF_BIT_1_2500V (0x7 << 0)
+
+/* EXYNOS_DP_PHY_PD */
+#define DP_PHY_PD (0x1 << 5)
+#define AUX_PD (0x1 << 4)
+#define CH3_PD (0x1 << 3)
+#define CH2_PD (0x1 << 2)
+#define CH1_PD (0x1 << 1)
+#define CH0_PD (0x1 << 0)
+
+/* EXYNOS_DP_PHY_TEST */
+#define MACRO_RST (0x1 << 5)
+#define CH1_TEST (0x1 << 1)
+#define CH0_TEST (0x1 << 0)
+
+/* EXYNOS_DP_AUX_CH_STA */
+#define AUX_BUSY (0x1 << 4)
+#define AUX_STATUS_MASK (0xf << 0)
+
+/* EXYNOS_DP_AUX_CH_DEFER_CTL */
+#define DEFER_CTRL_EN (0x1 << 7)
+#define DEFER_COUNT(x) (((x) & 0x7f) << 0)
+
+/* EXYNOS_DP_AUX_RX_COMM */
+#define AUX_RX_COMM_I2C_DEFER (0x2 << 2)
+#define AUX_RX_COMM_AUX_DEFER (0x2 << 0)
+
+/* EXYNOS_DP_BUFFER_DATA_CTL */
+#define BUF_CLR (0x1 << 7)
+#define BUF_DATA_COUNT(x) (((x) & 0x1f) << 0)
+
+/* EXYNOS_DP_AUX_CH_CTL_1 */
+#define AUX_LENGTH(x) (((x - 1) & 0xf) << 4)
+#define AUX_TX_COMM_MASK (0xf << 0)
+#define AUX_TX_COMM_DP_TRANSACTION (0x1 << 3)
+#define AUX_TX_COMM_I2C_TRANSACTION (0x0 << 3)
+#define AUX_TX_COMM_MOT (0x1 << 2)
+#define AUX_TX_COMM_WRITE (0x0 << 0)
+#define AUX_TX_COMM_READ (0x1 << 0)
+
+/* EXYNOS_DP_AUX_ADDR_7_0 */
+#define AUX_ADDR_7_0(x) (((x) >> 0) & 0xff)
+
+/* EXYNOS_DP_AUX_ADDR_15_8 */
+#define AUX_ADDR_15_8(x) (((x) >> 8) & 0xff)
+
+/* EXYNOS_DP_AUX_ADDR_19_16 */
+#define AUX_ADDR_19_16(x) (((x) >> 16) & 0x0f)
+
+/* EXYNOS_DP_AUX_CH_CTL_2 */
+#define ADDR_ONLY (0x1 << 1)
+#define AUX_EN (0x1 << 0)
+
+/* EXYNOS_DP_SOC_GENERAL_CTL */
+#define AUDIO_MODE_SPDIF_MODE (0x1 << 8)
+#define AUDIO_MODE_MASTER_MODE (0x0 << 8)
+#define MASTER_VIDEO_INTERLACE_EN (0x1 << 4)
+#define VIDEO_MASTER_CLK_SEL (0x1 << 2)
+#define VIDEO_MASTER_MODE_EN (0x1 << 1)
+#define VIDEO_MODE_MASK (0x1 << 0)
+#define VIDEO_MODE_SLAVE_MODE (0x1 << 0)
+#define VIDEO_MODE_MASTER_MODE (0x0 << 0)
+
+#endif /* _EXYNOS_DP_REG_H */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
new file mode 100644
index 00000000000..9c8088462c2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -0,0 +1,186 @@
+/* exynos_drm_buf.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
+
+static int lowlevel_buffer_allocate(struct drm_device *dev,
+ unsigned int flags, struct exynos_drm_gem_buf *buf)
+{
+ int ret = 0;
+ enum dma_attr attr;
+ unsigned int nr_pages;
+
+ if (buf->dma_addr) {
+ DRM_DEBUG_KMS("already allocated.\n");
+ return 0;
+ }
+
+ init_dma_attrs(&buf->dma_attrs);
+
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (!(flags & EXYNOS_BO_NONCONTIG))
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
+
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
+
+ dma_set_attr(attr, &buf->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
+
+ nr_pages = buf->size >> PAGE_SHIFT;
+
+ if (!is_drm_iommu_supported(dev)) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
+
+ buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ return -ENOMEM;
+ }
+
+ buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
+ buf->size,
+ &buf->dma_addr, GFP_KERNEL,
+ &buf->dma_attrs);
+ if (!buf->kvaddr) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ start_addr = buf->dma_addr;
+ while (i < nr_pages) {
+ buf->pages[i] = phys_to_page(start_addr);
+ start_addr += PAGE_SIZE;
+ i++;
+ }
+ } else {
+
+ buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL,
+ &buf->dma_attrs);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
+ }
+ }
+
+ buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+ if (IS_ERR(buf->sgt)) {
+ DRM_ERROR("failed to get sg table.\n");
+ ret = PTR_ERR(buf->sgt);
+ goto err_free_attrs;
+ }
+
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buf->dma_addr,
+ buf->size);
+
+ return ret;
+
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+ buf->dma_addr = (dma_addr_t)NULL;
+err_free:
+ if (!is_drm_iommu_supported(dev))
+ drm_free_large(buf->pages);
+
+ return ret;
+}
+
+static void lowlevel_buffer_deallocate(struct drm_device *dev,
+ unsigned int flags, struct exynos_drm_gem_buf *buf)
+{
+ if (!buf->dma_addr) {
+ DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buf->dma_addr,
+ buf->size);
+
+ sg_free_table(buf->sgt);
+
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+
+ if (!is_drm_iommu_supported(dev)) {
+ dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+ drm_free_large(buf->pages);
+ } else
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+
+ buf->dma_addr = (dma_addr_t)NULL;
+}
+
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+ unsigned int size)
+{
+ struct exynos_drm_gem_buf *buffer;
+
+ DRM_DEBUG_KMS("desired size = 0x%x\n", size);
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return NULL;
+
+ buffer->size = size;
+ return buffer;
+}
+
+void exynos_drm_fini_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buffer)
+{
+ kfree(buffer);
+ buffer = NULL;
+}
+
+int exynos_drm_alloc_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buf, unsigned int flags)
+{
+
+ /*
+ * allocate memory region and set the memory information
+ * to vaddr and dma_addr of a buffer object.
+ */
+ if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void exynos_drm_free_buf(struct drm_device *dev,
+ unsigned int flags, struct exynos_drm_gem_buf *buffer)
+{
+
+ lowlevel_buffer_deallocate(dev, flags, buffer);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
new file mode 100644
index 00000000000..a6412f19673
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -0,0 +1,33 @@
+/* exynos_drm_buf.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_BUF_H_
+#define _EXYNOS_DRM_BUF_H_
+
+/* create and initialize buffer object. */
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+ unsigned int size);
+
+/* destroy buffer object. */
+void exynos_drm_fini_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buffer);
+
+/* allocate physical memory region and setup sgt. */
+int exynos_drm_alloc_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buf,
+ unsigned int flags);
+
+/* release physical memory region, and sgt. */
+void exynos_drm_free_buf(struct drm_device *dev,
+ unsigned int flags,
+ struct exynos_drm_gem_buf *buffer);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
new file mode 100644
index 00000000000..9a16dbe121d
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
+
+#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
+ drm_connector)
+
+struct exynos_drm_connector {
+ struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_display *display;
+};
+
+static int exynos_drm_connector_get_modes(struct drm_connector *connector)
+{
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_display *display = exynos_connector->display;
+ struct edid *edid = NULL;
+ unsigned int count = 0;
+ int ret;
+
+ /*
+ * if get_edid() exists then get_edid() callback of hdmi side
+ * is called to get edid data through i2c interface else
+ * get timing from the FIMD driver(display controller).
+ *
+ * P.S. in case of lcd panel, count is always 1 if success
+ * because lcd panel has only one mode.
+ */
+ if (display->ops->get_edid) {
+ edid = display->ops->get_edid(display, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ ret = PTR_ERR(edid);
+ edid = NULL;
+ DRM_ERROR("Panel operation get_edid failed %d\n", ret);
+ goto out;
+ }
+
+ count = drm_add_edid_modes(connector, edid);
+ if (!count) {
+ DRM_ERROR("Add edid modes failed %d\n", count);
+ goto out;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ } else {
+ struct exynos_drm_panel_info *panel;
+ struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode.\n");
+ return 0;
+ }
+
+ if (display->ops->get_panel)
+ panel = display->ops->get_panel(display);
+ else {
+ drm_mode_destroy(connector->dev, mode);
+ return 0;
+ }
+
+ drm_display_mode_from_videomode(&panel->vm, mode);
+ mode->width_mm = panel->width_mm;
+ mode->height_mm = panel->height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ count = 1;
+ }
+
+out:
+ kfree(edid);
+ return count;
+}
+
+static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_display *display = exynos_connector->display;
+ int ret = MODE_BAD;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (display->ops->check_mode)
+ if (!display->ops->check_mode(display, mode))
+ ret = MODE_OK;
+
+ return ret;
+}
+
+static struct drm_encoder *exynos_drm_best_encoder(
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+ exynos_connector->encoder_id);
+ return NULL;
+ }
+
+ encoder = obj_to_encoder(obj);
+
+ return encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
+ .get_modes = exynos_drm_connector_get_modes,
+ .mode_valid = exynos_drm_connector_mode_valid,
+ .best_encoder = exynos_drm_best_encoder,
+};
+
+static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
+ unsigned int max_width, unsigned int max_height)
+{
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_display *display = exynos_connector->display;
+ unsigned int width, height;
+
+ width = max_width;
+ height = max_height;
+
+ /*
+ * if specific driver want to find desired_mode using maxmum
+ * resolution then get max width and height from that driver.
+ */
+ if (display->ops->get_max_resol)
+ display->ops->get_max_resol(display, &width, &height);
+
+ return drm_helper_probe_single_connector_modes(connector, width,
+ height);
+}
+
+/* get detection status of display device. */
+static enum drm_connector_status
+exynos_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_display *display = exynos_connector->display;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (display->ops->is_connected) {
+ if (display->ops->is_connected(display))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+ }
+
+ return status;
+}
+
+static void exynos_drm_connector_destroy(struct drm_connector *connector)
+{
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(exynos_connector);
+}
+
+static struct drm_connector_funcs exynos_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = exynos_drm_connector_fill_modes,
+ .detect = exynos_drm_connector_detect,
+ .destroy = exynos_drm_connector_destroy,
+};
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct exynos_drm_connector *exynos_connector;
+ struct exynos_drm_display *display = exynos_drm_get_display(encoder);
+ struct drm_connector *connector;
+ int type;
+ int err;
+
+ exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
+ if (!exynos_connector)
+ return NULL;
+
+ connector = &exynos_connector->drm_connector;
+
+ switch (display->type) {
+ case EXYNOS_DISPLAY_TYPE_HDMI:
+ type = DRM_MODE_CONNECTOR_HDMIA;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case EXYNOS_DISPLAY_TYPE_VIDI:
+ type = DRM_MODE_CONNECTOR_VIRTUAL;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ type = DRM_MODE_CONNECTOR_Unknown;
+ break;
+ }
+
+ drm_connector_init(dev, connector, &exynos_connector_funcs, type);
+ drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
+
+ err = drm_sysfs_connector_add(connector);
+ if (err)
+ goto err_connector;
+
+ exynos_connector->encoder_id = encoder->base.id;
+ exynos_connector->display = display;
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->encoder = encoder;
+
+ err = drm_mode_connector_attach_encoder(connector, encoder);
+ if (err) {
+ DRM_ERROR("failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ DRM_DEBUG_KMS("connector has been created\n");
+
+ return connector;
+
+err_sysfs:
+ drm_sysfs_connector_remove(connector);
+err_connector:
+ drm_connector_cleanup(connector);
+ kfree(exynos_connector);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
new file mode 100644
index 00000000000..4eb20d78379
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_CONNECTOR_H_
+#define _EXYNOS_DRM_CONNECTOR_H_
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+ struct drm_encoder *encoder);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
new file mode 100644
index 00000000000..4c9f972eaa0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -0,0 +1,161 @@
+/* exynos_drm_core.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_fbdev.h"
+
+static LIST_HEAD(exynos_drm_subdrv_list);
+
+int exynos_drm_create_enc_conn(struct drm_device *dev,
+ struct exynos_drm_display *display)
+{
+ struct drm_encoder *encoder;
+ int ret;
+ unsigned long possible_crtcs = 0;
+
+ ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
+ if (ret < 0)
+ return ret;
+
+ possible_crtcs |= 1 << ret;
+
+ /* create and initialize a encoder for this sub driver. */
+ encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
+ if (!encoder) {
+ DRM_ERROR("failed to create encoder\n");
+ return -EFAULT;
+ }
+
+ display->encoder = encoder;
+
+ ret = display->ops->create_connector(display, encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ goto err_destroy_encoder;
+ }
+
+ return 0;
+
+err_destroy_encoder:
+ encoder->funcs->destroy(encoder);
+ return ret;
+}
+
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
+{
+ if (!subdrv)
+ return -EINVAL;
+
+ list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
+
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
+{
+ if (!subdrv)
+ return -EINVAL;
+
+ list_del(&subdrv->list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
+
+int exynos_drm_device_subdrv_probe(struct drm_device *dev)
+{
+ struct exynos_drm_subdrv *subdrv, *n;
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
+ if (subdrv->probe) {
+ subdrv->drm_dev = dev;
+
+ /*
+ * this probe callback would be called by sub driver
+ * after setting of all resources to this sub driver,
+ * such as clock, irq and register map are done.
+ */
+ err = subdrv->probe(dev, subdrv->dev);
+ if (err) {
+ DRM_DEBUG("exynos drm subdrv probe failed.\n");
+ list_del(&subdrv->list);
+ continue;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
+
+int exynos_drm_device_subdrv_remove(struct drm_device *dev)
+{
+ struct exynos_drm_subdrv *subdrv;
+
+ if (!dev) {
+ WARN(1, "Unexpected drm device unregister!\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+ if (subdrv->remove)
+ subdrv->remove(dev, subdrv->dev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
+
+int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+ if (subdrv->open) {
+ ret = subdrv->open(dev, subdrv->dev, file);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+ if (subdrv->close)
+ subdrv->close(dev, subdrv->dev, file);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
+
+void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct exynos_drm_subdrv *subdrv;
+
+ list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+ if (subdrv->close)
+ subdrv->close(dev, subdrv->dev, file);
+ }
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
new file mode 100644
index 00000000000..95c9435d026
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -0,0 +1,510 @@
+/* exynos_drm_crtc.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_plane.h"
+
+#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
+ drm_crtc)
+
+enum exynos_crtc_mode {
+ CRTC_MODE_NORMAL, /* normal mode */
+ CRTC_MODE_BLANK, /* The private plane of crtc is blank */
+};
+
+/*
+ * Exynos specific crtc structure.
+ *
+ * @drm_crtc: crtc object.
+ * @drm_plane: pointer of private plane object for this crtc
+ * @manager: the manager associated with this crtc
+ * @pipe: a crtc index created at load() with a new crtc object creation
+ * and the crtc object would be set to private->crtc array
+ * to get a crtc object corresponding to this pipe from private->crtc
+ * array when irq interrupt occurred. the reason of using this pipe is that
+ * drm framework doesn't support multiple irq yet.
+ * we can refer to the crtc to current hardware interrupt occurred through
+ * this pipe value.
+ * @dpms: store the crtc dpms value
+ * @mode: store the crtc mode value
+ */
+struct exynos_drm_crtc {
+ struct drm_crtc drm_crtc;
+ struct drm_plane *plane;
+ struct exynos_drm_manager *manager;
+ unsigned int pipe;
+ unsigned int dpms;
+ enum exynos_crtc_mode mode;
+ wait_queue_head_t pending_flip_queue;
+ atomic_t pending_flip;
+};
+
+static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
+
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ if (exynos_crtc->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ if (mode > DRM_MODE_DPMS_ON) {
+ /* wait for the completion of page flip. */
+ wait_event(exynos_crtc->pending_flip_queue,
+ atomic_read(&exynos_crtc->pending_flip) == 0);
+ drm_vblank_off(crtc->dev, exynos_crtc->pipe);
+ }
+
+ if (manager->ops->dpms)
+ manager->ops->dpms(manager, mode);
+
+ exynos_crtc->dpms = mode;
+}
+
+static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ /* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
+
+ exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+ exynos_plane_commit(exynos_crtc->plane);
+
+ if (manager->ops->commit)
+ manager->ops->commit(manager);
+
+ exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
+}
+
+static bool
+exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
+
+ if (manager->ops->mode_fixup)
+ return manager->ops->mode_fixup(manager, mode, adjusted_mode);
+
+ return true;
+}
+
+static int
+exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
+ struct drm_plane *plane = exynos_crtc->plane;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+ int ret;
+
+ /*
+ * copy the mode data adjusted by mode_fixup() into crtc->mode
+ * so that hardware can be seet to proper mode.
+ */
+ memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
+
+ crtc_w = crtc->primary->fb->width - x;
+ crtc_h = crtc->primary->fb->height - y;
+
+ if (manager->ops->mode_set)
+ manager->ops->mode_set(manager, &crtc->mode);
+
+ ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
+ x, y, crtc_w, crtc_h);
+ if (ret)
+ return ret;
+
+ plane->crtc = crtc;
+ plane->fb = crtc->primary->fb;
+ drm_framebuffer_reference(plane->fb);
+
+ return 0;
+}
+
+static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane = exynos_crtc->plane;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+ int ret;
+
+ /* when framebuffer changing is requested, crtc's dpms should be on */
+ if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
+ DRM_ERROR("failed framebuffer changing request.\n");
+ return -EPERM;
+ }
+
+ crtc_w = crtc->primary->fb->width - x;
+ crtc_h = crtc->primary->fb->height - y;
+
+ ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
+ x, y, crtc_w, crtc_h);
+ if (ret)
+ return ret;
+
+ exynos_drm_crtc_commit(crtc);
+
+ return 0;
+}
+
+static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return exynos_drm_crtc_mode_set_commit(crtc, x, y, old_fb);
+}
+
+static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ int ret;
+
+ exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
+ if (plane->crtc != crtc)
+ continue;
+
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("Failed to disable plane %d\n", ret);
+ }
+}
+
+static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+ .dpms = exynos_drm_crtc_dpms,
+ .prepare = exynos_drm_crtc_prepare,
+ .commit = exynos_drm_crtc_commit,
+ .mode_fixup = exynos_drm_crtc_mode_fixup,
+ .mode_set = exynos_drm_crtc_mode_set,
+ .mode_set_base = exynos_drm_crtc_mode_set_base,
+ .disable = exynos_drm_crtc_disable,
+};
+
+static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ int ret = -EINVAL;
+
+ /* when the page flip is requested, crtc's dpms should be on */
+ if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
+ DRM_ERROR("failed page flip request.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (event) {
+ /*
+ * the pipe from user always is 0 so we can set pipe number
+ * of current owner to event.
+ */
+ event->pipe = exynos_crtc->pipe;
+
+ ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter\n");
+
+ goto out;
+ }
+
+ spin_lock_irq(&dev->event_lock);
+ list_add_tail(&event->base.link,
+ &dev_priv->pageflip_event_list);
+ atomic_set(&exynos_crtc->pending_flip, 1);
+ spin_unlock_irq(&dev->event_lock);
+
+ crtc->primary->fb = fb;
+ ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y,
+ NULL);
+ if (ret) {
+ crtc->primary->fb = old_fb;
+
+ spin_lock_irq(&dev->event_lock);
+ drm_vblank_put(dev, exynos_crtc->pipe);
+ list_del(&event->base.link);
+ spin_unlock_irq(&dev->event_lock);
+
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_private *private = crtc->dev->dev_private;
+
+ private->crtc[exynos_crtc->pipe] = NULL;
+
+ drm_crtc_cleanup(crtc);
+ kfree(exynos_crtc);
+}
+
+static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (property == dev_priv->crtc_mode_property) {
+ enum exynos_crtc_mode mode = val;
+
+ if (mode == exynos_crtc->mode)
+ return 0;
+
+ exynos_crtc->mode = mode;
+
+ switch (mode) {
+ case CRTC_MODE_NORMAL:
+ exynos_drm_crtc_commit(crtc);
+ break;
+ case CRTC_MODE_BLANK:
+ exynos_plane_dpms(exynos_crtc->plane,
+ DRM_MODE_DPMS_OFF);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct drm_crtc_funcs exynos_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = exynos_drm_crtc_page_flip,
+ .destroy = exynos_drm_crtc_destroy,
+ .set_property = exynos_drm_crtc_set_property,
+};
+
+static const struct drm_prop_enum_list mode_names[] = {
+ { CRTC_MODE_NORMAL, "normal" },
+ { CRTC_MODE_BLANK, "blank" },
+};
+
+static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->crtc_mode_property;
+ if (!prop) {
+ prop = drm_property_create_enum(dev, 0, "mode", mode_names,
+ ARRAY_SIZE(mode_names));
+ if (!prop)
+ return;
+
+ dev_priv->crtc_mode_property = prop;
+ }
+
+ drm_object_attach_property(&crtc->base, prop, 0);
+}
+
+int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
+{
+ struct exynos_drm_crtc *exynos_crtc;
+ struct exynos_drm_private *private = manager->drm_dev->dev_private;
+ struct drm_crtc *crtc;
+
+ exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
+ if (!exynos_crtc)
+ return -ENOMEM;
+
+ init_waitqueue_head(&exynos_crtc->pending_flip_queue);
+ atomic_set(&exynos_crtc->pending_flip, 0);
+
+ exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+ exynos_crtc->manager = manager;
+ exynos_crtc->pipe = manager->pipe;
+ exynos_crtc->plane = exynos_plane_init(manager->drm_dev,
+ 1 << manager->pipe, true);
+ if (!exynos_crtc->plane) {
+ kfree(exynos_crtc);
+ return -ENOMEM;
+ }
+
+ manager->crtc = &exynos_crtc->drm_crtc;
+ crtc = &exynos_crtc->drm_crtc;
+
+ private->crtc[manager->pipe] = crtc;
+
+ drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs);
+ drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
+
+ exynos_drm_crtc_attach_mode_property(crtc);
+
+ return 0;
+}
+
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[pipe]);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
+
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return -EPERM;
+
+ if (manager->ops->enable_vblank)
+ manager->ops->enable_vblank(manager);
+
+ return 0;
+}
+
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[pipe]);
+ struct exynos_drm_manager *manager = exynos_crtc->manager;
+
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return;
+
+ if (manager->ops->disable_vblank)
+ manager->ops->disable_vblank(manager);
+}
+
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
+{
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (pipe != e->pipe)
+ continue;
+
+ list_del(&e->base.link);
+ drm_send_vblank_event(dev, -1, e);
+ drm_vblank_put(dev, pipe);
+ atomic_set(&exynos_crtc->pending_flip, 0);
+ wake_up(&exynos_crtc->pending_flip_queue);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
+ struct exynos_drm_overlay *overlay)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_mode_set)
+ manager->ops->win_mode_set(manager, overlay);
+}
+
+void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_commit)
+ manager->ops->win_commit(manager, zpos);
+}
+
+void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_enable)
+ manager->ops->win_enable(manager, zpos);
+}
+
+void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->win_disable)
+ manager->ops->win_disable(manager, zpos);
+}
+
+void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_manager *manager;
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ manager = to_exynos_crtc(crtc)->manager;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (manager->ops->wait_for_vblank)
+ manager->ops->wait_for_vblank(manager);
+ }
+}
+
+int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+ unsigned int out_type)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+ struct exynos_drm_crtc *exynos_crtc;
+
+ exynos_crtc = to_exynos_crtc(crtc);
+ if (exynos_crtc->manager->type == out_type)
+ return exynos_crtc->manager->pipe;
+ }
+
+ return -EPERM;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
new file mode 100644
index 00000000000..9f74b10a8a0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -0,0 +1,39 @@
+/* exynos_drm_crtc.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_CRTC_H_
+#define _EXYNOS_DRM_CRTC_H_
+
+struct drm_device;
+struct drm_crtc;
+struct exynos_drm_manager;
+struct exynos_drm_overlay;
+
+int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
+
+void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
+ struct exynos_drm_overlay *overlay);
+void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
+void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
+void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
+
+/* This function gets pipe value to crtc device matched with out_type. */
+int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+ unsigned int out_type);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
new file mode 100644
index 00000000000..2a3ad24276f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -0,0 +1,285 @@
+/* exynos_drm_dmabuf.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_dmabuf.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#include <linux/dma-buf.h>
+
+struct exynos_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+ bool is_mapped;
+};
+
+static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_exynos_gem_obj(buf->priv);
+}
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach;
+
+ exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+ if (!exynos_attach)
+ return -ENOMEM;
+
+ exynos_attach->dir = DMA_NONE;
+ attach->priv = exynos_attach;
+
+ return 0;
+}
+
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct sg_table *sgt;
+
+ if (!exynos_attach)
+ return;
+
+ sgt = &exynos_attach->sgt;
+
+ if (exynos_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ exynos_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(exynos_attach);
+ attach->priv = NULL;
+}
+
+static struct sg_table *
+ exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
+ struct drm_device *dev = gem_obj->base.dev;
+ struct exynos_drm_gem_buf *buf;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt = NULL;
+ unsigned int i;
+ int nents, ret;
+
+ /* just return current sgt if already requested. */
+ if (exynos_attach->dir == dir && exynos_attach->is_mapped)
+ return &exynos_attach->sgt;
+
+ buf = gem_obj->buffer;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sgt = &exynos_attach->sgt;
+
+ ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ rd = buf->sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ if (dir != DMA_NONE) {
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sg_free_table(sgt);
+ sgt = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+ }
+
+ exynos_attach->is_mapped = true;
+ exynos_attach->dir = dir;
+ attach->priv = exynos_attach;
+
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return sgt;
+}
+
+static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* Nothing to do. */
+}
+
+static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ /* TODO */
+}
+
+static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* TODO */
+}
+
+static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ return -ENOTTY;
+}
+
+static struct dma_buf_ops exynos_dmabuf_ops = {
+ .attach = exynos_gem_attach_dma_buf,
+ .detach = exynos_gem_detach_dma_buf,
+ .map_dma_buf = exynos_gem_map_dma_buf,
+ .unmap_dma_buf = exynos_gem_unmap_dma_buf,
+ .kmap = exynos_gem_dmabuf_kmap,
+ .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
+ .kunmap = exynos_gem_dmabuf_kunmap,
+ .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
+ .mmap = exynos_gem_dmabuf_mmap,
+ .release = drm_gem_dmabuf_release,
+};
+
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ return dma_buf_export(obj, &exynos_dmabuf_ops,
+ exynos_gem_obj->base.size, flags);
+}
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer;
+ int ret;
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &exynos_dmabuf_ops) {
+ struct drm_gem_object *obj;
+
+ obj = dma_buf->priv;
+
+ /* is it from our device? */
+ if (obj->dev == drm_dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, drm_dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(-EINVAL);
+
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_buf_detach;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto err_unmap_attach;
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
+ if (!exynos_gem_obj) {
+ ret = -ENOMEM;
+ goto err_free_buffer;
+ }
+
+ sgl = sgt->sgl;
+
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+
+ if (sgt->nents == 1) {
+ /* always physically continuous memory if sgt->nents is 1. */
+ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+ } else {
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
+ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+ buffer->sgt = sgt;
+ exynos_gem_obj->base.import_attach = attach;
+
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
+ buffer->size);
+
+ return &exynos_gem_obj->base;
+
+err_free_buffer:
+ kfree(buffer);
+ buffer = NULL;
+err_unmap_attach:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+err_buf_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
new file mode 100644
index 00000000000..49acfafb4fd
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -0,0 +1,25 @@
+/* exynos_drm_dmabuf.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_DMABUF_H_
+#define _EXYNOS_DRM_DMABUF_H_
+
+#ifdef CONFIG_DRM_EXYNOS_DMABUF
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags);
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf);
+#else
+#define exynos_dmabuf_prime_export NULL
+#define exynos_dmabuf_prime_import NULL
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
new file mode 100644
index 00000000000..9e530f205ad
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -0,0 +1,347 @@
+/*
+ * Exynos DRM Parallel output support.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ *
+ * Contacts: Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include <linux/regulator/consumer.h>
+
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include "exynos_drm_drv.h"
+
+struct exynos_dpi {
+ struct device *dev;
+ struct device_node *panel_node;
+
+ struct drm_panel *panel;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
+
+ struct videomode *vm;
+ int dpms_mode;
+};
+
+#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
+
+static enum drm_connector_status
+exynos_dpi_detect(struct drm_connector *connector, bool force)
+{
+ struct exynos_dpi *ctx = connector_to_dpi(connector);
+
+ if (ctx->panel && !ctx->panel->connector)
+ drm_panel_attach(ctx->panel, &ctx->connector);
+
+ return connector_status_connected;
+}
+
+static void exynos_dpi_connector_destroy(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs exynos_dpi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = exynos_dpi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = exynos_dpi_connector_destroy,
+};
+
+static int exynos_dpi_get_modes(struct drm_connector *connector)
+{
+ struct exynos_dpi *ctx = connector_to_dpi(connector);
+
+ /* fimd timings gets precedence over panel modes */
+ if (ctx->vm) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode\n");
+ return 0;
+ }
+ drm_display_mode_from_videomode(ctx->vm, mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ if (ctx->panel)
+ return ctx->panel->funcs->get_modes(ctx->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+exynos_dpi_best_encoder(struct drm_connector *connector)
+{
+ struct exynos_dpi *ctx = connector_to_dpi(connector);
+
+ return ctx->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
+ .get_modes = exynos_dpi_get_modes,
+ .best_encoder = exynos_dpi_best_encoder,
+};
+
+static int exynos_dpi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct exynos_dpi *ctx = display->ctx;
+ struct drm_connector *connector = &ctx->connector;
+ int ret;
+
+ ctx->encoder = encoder;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &exynos_dpi_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ if (ret) {
+ DRM_ERROR("failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void exynos_dpi_poweron(struct exynos_dpi *ctx)
+{
+ if (ctx->panel)
+ drm_panel_enable(ctx->panel);
+}
+
+static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
+{
+ if (ctx->panel)
+ drm_panel_disable(ctx->panel);
+}
+
+static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
+{
+ struct exynos_dpi *ctx = display->ctx;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
+ exynos_dpi_poweron(ctx);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
+ exynos_dpi_poweroff(ctx);
+ break;
+ default:
+ break;
+ }
+ ctx->dpms_mode = mode;
+}
+
+static struct exynos_drm_display_ops exynos_dpi_display_ops = {
+ .create_connector = exynos_dpi_create_connector,
+ .dpms = exynos_dpi_dpms
+};
+
+static struct exynos_drm_display exynos_dpi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &exynos_dpi_display_ops,
+};
+
+/* of_* functions will be removed after merge of of_graph patches */
+static struct device_node *
+of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(parent, np) {
+ u32 r;
+
+ if (!np->name || of_node_cmp(np->name, name))
+ continue;
+
+ if (of_property_read_u32(np, "reg", &r) < 0)
+ r = 0;
+
+ if (reg == r)
+ break;
+ }
+
+ return np;
+}
+
+static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
+ u32 reg)
+{
+ struct device_node *ports, *port;
+
+ ports = of_get_child_by_name(parent, "ports");
+ if (ports)
+ parent = ports;
+
+ port = of_get_child_by_name_reg(parent, "port", reg);
+
+ of_node_put(ports);
+
+ return port;
+}
+
+static struct device_node *
+of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
+{
+ return of_get_child_by_name_reg(port, "endpoint", reg);
+}
+
+static struct device_node *
+of_graph_get_remote_port_parent(const struct device_node *node)
+{
+ struct device_node *np;
+ unsigned int depth;
+
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+
+ /* Walk 3 levels up only if there is 'ports' node. */
+ for (depth = 3; depth && np; depth--) {
+ np = of_get_next_parent(np);
+ if (depth == 2 && of_node_cmp(np->name, "ports"))
+ break;
+ }
+ return np;
+}
+
+enum {
+ FIMD_PORT_IN0,
+ FIMD_PORT_IN1,
+ FIMD_PORT_IN2,
+ FIMD_PORT_RGB,
+ FIMD_PORT_WRB,
+};
+
+static struct device_node *exynos_dpi_of_find_panel_node(struct device *dev)
+{
+ struct device_node *np, *ep;
+
+ np = of_graph_get_port_by_reg(dev->of_node, FIMD_PORT_RGB);
+ if (!np)
+ return NULL;
+
+ ep = of_graph_get_endpoint_by_reg(np, 0);
+ of_node_put(np);
+ if (!ep)
+ return NULL;
+
+ np = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+
+ return np;
+}
+
+static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
+{
+ struct device *dev = ctx->dev;
+ struct device_node *dn = dev->of_node;
+ struct device_node *np;
+
+ ctx->panel_node = exynos_dpi_of_find_panel_node(dev);
+
+ np = of_get_child_by_name(dn, "display-timings");
+ if (np) {
+ struct videomode *vm;
+ int ret;
+
+ of_node_put(np);
+
+ vm = devm_kzalloc(dev, sizeof(*ctx->vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ ret = of_get_videomode(dn, vm, 0);
+ if (ret < 0) {
+ devm_kfree(dev, vm);
+ return ret;
+ }
+
+ ctx->vm = vm;
+
+ return 0;
+ }
+
+ if (!ctx->panel_node)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
+{
+ struct exynos_dpi *ctx;
+ int ret;
+
+ ret = exynos_drm_component_add(dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR,
+ exynos_dpi_display.type);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ goto err_del_component;
+
+ ctx->dev = dev;
+ exynos_dpi_display.ctx = ctx;
+ ctx->dpms_mode = DRM_MODE_DPMS_OFF;
+
+ ret = exynos_dpi_parse_dt(ctx);
+ if (ret < 0) {
+ devm_kfree(dev, ctx);
+ goto err_del_component;
+ }
+
+ if (ctx->panel_node) {
+ ctx->panel = of_drm_find_panel(ctx->panel_node);
+ if (!ctx->panel) {
+ exynos_drm_component_del(dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ }
+
+ return &exynos_dpi_display;
+
+err_del_component:
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return NULL;
+}
+
+int exynos_dpi_remove(struct device *dev)
+{
+ struct drm_encoder *encoder = exynos_dpi_display.encoder;
+ struct exynos_dpi *ctx = exynos_dpi_display.ctx;
+
+ exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&ctx->connector);
+
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
new file mode 100644
index 00000000000..ab7d182063c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/pm_runtime.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <linux/anon_inodes.h>
+#include <linux/component.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
+#include "exynos_drm_vidi.h"
+#include "exynos_drm_dmabuf.h"
+#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+#define DRIVER_NAME "exynos"
+#define DRIVER_DESC "Samsung SoC DRM"
+#define DRIVER_DATE "20110530"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define VBLANK_OFF_DELAY 50000
+
+static struct platform_device *exynos_drm_pdev;
+
+static DEFINE_MUTEX(drm_component_lock);
+static LIST_HEAD(drm_component_list);
+
+struct component_dev {
+ struct list_head list;
+ struct device *crtc_dev;
+ struct device *conn_dev;
+ enum exynos_drm_output_type out_type;
+ unsigned int dev_type_flag;
+};
+
+static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct exynos_drm_private *private;
+ int ret;
+ int nr;
+
+ private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&private->pageflip_event_list);
+ dev_set_drvdata(dev->dev, dev);
+ dev->dev_private = (void *)private;
+
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_free_private;
+ }
+
+ drm_mode_config_init(dev);
+
+ exynos_drm_mode_config_init(dev);
+
+ for (nr = 0; nr < MAX_PLANE; nr++) {
+ struct drm_plane *plane;
+ unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
+
+ plane = exynos_plane_init(dev, possible_crtcs, false);
+ if (!plane)
+ goto err_mode_config_cleanup;
+ }
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
+ ret = drm_vblank_init(dev, MAX_CRTC);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ /* setup possible_clones. */
+ exynos_drm_encoder_setup(dev);
+
+ drm_vblank_offdelay = VBLANK_OFF_DELAY;
+
+ platform_set_drvdata(dev->platformdev, dev);
+
+ /* Try to bind all sub drivers. */
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ goto err_cleanup_vblank;
+
+ /* Probe non kms sub drivers and virtual display driver. */
+ ret = exynos_drm_device_subdrv_probe(dev);
+ if (ret)
+ goto err_unbind_all;
+
+ /* force connectors detection */
+ drm_helper_hpd_irq_event(dev);
+
+ return 0;
+
+err_unbind_all:
+ component_unbind_all(dev->dev, dev);
+err_cleanup_vblank:
+ drm_vblank_cleanup(dev);
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+ drm_release_iommu_mapping(dev);
+err_free_private:
+ kfree(private);
+
+ return ret;
+}
+
+static int exynos_drm_unload(struct drm_device *dev)
+{
+ exynos_drm_device_subdrv_remove(dev);
+
+ exynos_drm_fbdev_fini(dev);
+ drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
+ kfree(dev->dev_private);
+
+ component_unbind_all(dev->dev, dev);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static const struct file_operations exynos_drm_gem_fops = {
+ .mmap = exynos_drm_gem_mmap_buffer,
+};
+
+static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
+{
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+ /* Set the old mode back to the connector for resume */
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int exynos_drm_resume(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, connector->dpms);
+ }
+ drm_modeset_unlock_all(dev);
+
+ drm_helper_resume_force_mode(dev);
+
+ return 0;
+}
+
+static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv;
+ struct file *anon_filp;
+ int ret;
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ ret = exynos_drm_subdrv_open(dev, file);
+ if (ret)
+ goto err_file_priv_free;
+
+ anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
+ NULL, 0);
+ if (IS_ERR(anon_filp)) {
+ ret = PTR_ERR(anon_filp);
+ goto err_subdrv_close;
+ }
+
+ anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
+ file_priv->anon_filp = anon_filp;
+
+ return ret;
+
+err_subdrv_close:
+ exynos_drm_subdrv_close(dev, file);
+
+err_file_priv_free:
+ kfree(file_priv);
+ file->driver_priv = NULL;
+ return ret;
+}
+
+static void exynos_drm_preclose(struct drm_device *dev,
+ struct drm_file *file)
+{
+ exynos_drm_subdrv_close(dev, file);
+}
+
+static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_exynos_file_private *file_priv;
+ struct drm_pending_vblank_event *v, *vt;
+ struct drm_pending_event *e, *et;
+ unsigned long flags;
+
+ if (!file->driver_priv)
+ return;
+
+ /* Release all events not unhandled by page flip handler. */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
+ base.link) {
+ if (v->base.file_priv == file) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base);
+ }
+ }
+
+ /* Release all events handled by page flip handler but not freed. */
+ list_for_each_entry_safe(e, et, &file->event_list, link) {
+ list_del(&e->link);
+ e->destroy(e);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ file_priv = file->driver_priv;
+ if (file_priv->anon_filp)
+ fput(file_priv->anon_filp);
+
+ kfree(file->driver_priv);
+ file->driver_priv = NULL;
+}
+
+static void exynos_drm_lastclose(struct drm_device *dev)
+{
+ exynos_drm_fbdev_restore_mode(dev);
+}
+
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
+ .fault = exynos_drm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_ioctl_desc exynos_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
+ exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
+ DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
+ exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
+ exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
+ vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
+ exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
+ exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
+ exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+ exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+ exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations exynos_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = exynos_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .release = drm_release,
+};
+
+static struct drm_driver exynos_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .load = exynos_drm_load,
+ .unload = exynos_drm_unload,
+ .suspend = exynos_drm_suspend,
+ .resume = exynos_drm_resume,
+ .open = exynos_drm_open,
+ .preclose = exynos_drm_preclose,
+ .lastclose = exynos_drm_lastclose,
+ .postclose = exynos_drm_postclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = exynos_drm_crtc_enable_vblank,
+ .disable_vblank = exynos_drm_crtc_disable_vblank,
+ .gem_free_object = exynos_drm_gem_free_object,
+ .gem_vm_ops = &exynos_drm_gem_vm_ops,
+ .dumb_create = exynos_drm_gem_dumb_create,
+ .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = exynos_dmabuf_prime_export,
+ .gem_prime_import = exynos_dmabuf_prime_import,
+ .ioctls = exynos_ioctls,
+ .num_ioctls = ARRAY_SIZE(exynos_ioctls),
+ .fops = &exynos_drm_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ pm_message_t message;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ message.event = PM_EVENT_SUSPEND;
+ return exynos_drm_suspend(drm_dev, message);
+}
+
+static int exynos_drm_sys_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return exynos_drm_resume(drm_dev);
+}
+#endif
+
+static const struct dev_pm_ops exynos_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
+};
+
+int exynos_drm_component_add(struct device *dev,
+ enum exynos_drm_device_type dev_type,
+ enum exynos_drm_output_type out_type)
+{
+ struct component_dev *cdev;
+
+ if (dev_type != EXYNOS_DEVICE_TYPE_CRTC &&
+ dev_type != EXYNOS_DEVICE_TYPE_CONNECTOR) {
+ DRM_ERROR("invalid device type.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&drm_component_lock);
+
+ /*
+ * Make sure to check if there is a component which has two device
+ * objects, for connector and for encoder/connector.
+ * It should make sure that crtc and encoder/connector drivers are
+ * ready before exynos drm core binds them.
+ */
+ list_for_each_entry(cdev, &drm_component_list, list) {
+ if (cdev->out_type == out_type) {
+ /*
+ * If crtc and encoder/connector device objects are
+ * added already just return.
+ */
+ if (cdev->dev_type_flag == (EXYNOS_DEVICE_TYPE_CRTC |
+ EXYNOS_DEVICE_TYPE_CONNECTOR)) {
+ mutex_unlock(&drm_component_lock);
+ return 0;
+ }
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
+ cdev->crtc_dev = dev;
+ cdev->dev_type_flag |= dev_type;
+ }
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
+ cdev->conn_dev = dev;
+ cdev->dev_type_flag |= dev_type;
+ }
+
+ mutex_unlock(&drm_component_lock);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&drm_component_lock);
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CRTC)
+ cdev->crtc_dev = dev;
+ if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR)
+ cdev->conn_dev = dev;
+
+ cdev->out_type = out_type;
+ cdev->dev_type_flag = dev_type;
+
+ mutex_lock(&drm_component_lock);
+ list_add_tail(&cdev->list, &drm_component_list);
+ mutex_unlock(&drm_component_lock);
+
+ return 0;
+}
+
+void exynos_drm_component_del(struct device *dev,
+ enum exynos_drm_device_type dev_type)
+{
+ struct component_dev *cdev, *next;
+
+ mutex_lock(&drm_component_lock);
+
+ list_for_each_entry_safe(cdev, next, &drm_component_list, list) {
+ if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
+ if (cdev->crtc_dev == dev) {
+ cdev->crtc_dev = NULL;
+ cdev->dev_type_flag &= ~dev_type;
+ }
+ }
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
+ if (cdev->conn_dev == dev) {
+ cdev->conn_dev = NULL;
+ cdev->dev_type_flag &= ~dev_type;
+ }
+ }
+
+ /*
+ * Release cdev object only in case that both of crtc and
+ * encoder/connector device objects are NULL.
+ */
+ if (!cdev->crtc_dev && !cdev->conn_dev) {
+ list_del(&cdev->list);
+ kfree(cdev);
+ }
+
+ break;
+ }
+
+ mutex_unlock(&drm_component_lock);
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev == (struct device *)data;
+}
+
+static int exynos_drm_add_components(struct device *dev, struct master *m)
+{
+ struct component_dev *cdev;
+ unsigned int attach_cnt = 0;
+
+ mutex_lock(&drm_component_lock);
+
+ list_for_each_entry(cdev, &drm_component_list, list) {
+ int ret;
+
+ /*
+ * Add components to master only in case that crtc and
+ * encoder/connector device objects exist.
+ */
+ if (!cdev->crtc_dev || !cdev->conn_dev)
+ continue;
+
+ attach_cnt++;
+
+ mutex_unlock(&drm_component_lock);
+
+ /*
+ * fimd and dpi modules have same device object so add
+ * only crtc device object in this case.
+ *
+ * TODO. if dpi module follows driver-model driver then
+ * below codes can be removed.
+ */
+ if (cdev->crtc_dev == cdev->conn_dev) {
+ ret = component_master_add_child(m, compare_of,
+ cdev->crtc_dev);
+ if (ret < 0)
+ return ret;
+
+ goto out_lock;
+ }
+
+ /*
+ * Do not chage below call order.
+ * crtc device first should be added to master because
+ * connector/encoder need pipe number of crtc when they
+ * are created.
+ */
+ ret = component_master_add_child(m, compare_of, cdev->crtc_dev);
+ ret |= component_master_add_child(m, compare_of,
+ cdev->conn_dev);
+ if (ret < 0)
+ return ret;
+
+out_lock:
+ mutex_lock(&drm_component_lock);
+ }
+
+ mutex_unlock(&drm_component_lock);
+
+ return attach_cnt ? 0 : -ENODEV;
+}
+
+static int exynos_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
+}
+
+static void exynos_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops exynos_drm_ops = {
+ .add_components = exynos_drm_add_components,
+ .bind = exynos_drm_bind,
+ .unbind = exynos_drm_unbind,
+};
+
+static int exynos_drm_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ ret = platform_driver_register(&fimd_driver);
+ if (ret < 0)
+ return ret;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DP
+ ret = platform_driver_register(&dp_driver);
+ if (ret < 0)
+ goto err_unregister_fimd_drv;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DSI
+ ret = platform_driver_register(&dsi_driver);
+ if (ret < 0)
+ goto err_unregister_dp_drv;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+ ret = platform_driver_register(&mixer_driver);
+ if (ret < 0)
+ goto err_unregister_dsi_drv;
+ ret = platform_driver_register(&hdmi_driver);
+ if (ret < 0)
+ goto err_unregister_mixer_drv;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ ret = platform_driver_register(&g2d_driver);
+ if (ret < 0)
+ goto err_unregister_hdmi_drv;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto err_unregister_g2d_drv;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ ret = platform_driver_register(&rotator_driver);
+ if (ret < 0)
+ goto err_unregister_fimc_drv;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto err_unregister_rotator_drv;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto err_unregister_gsc_drv;
+
+ ret = exynos_platform_device_ipp_register();
+ if (ret < 0)
+ goto err_unregister_ipp_drv;
+#endif
+
+ ret = component_master_add(&pdev->dev, &exynos_drm_ops);
+ if (ret < 0)
+ DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n");
+
+ return 0;
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+err_unregister_ipp_drv:
+ platform_driver_unregister(&ipp_driver);
+err_unregister_gsc_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+err_unregister_rotator_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+err_unregister_fimc_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+err_unregister_g2d_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+err_unregister_hdmi_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+ platform_driver_unregister(&hdmi_driver);
+err_unregister_mixer_drv:
+ platform_driver_unregister(&mixer_driver);
+err_unregister_dsi_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DSI
+ platform_driver_unregister(&dsi_driver);
+err_unregister_dp_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DP
+ platform_driver_unregister(&dp_driver);
+err_unregister_fimd_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ platform_driver_unregister(&fimd_driver);
+#endif
+ return ret;
+}
+
+static int exynos_drm_platform_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ exynos_platform_device_ipp_unregister();
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+ platform_driver_unregister(&mixer_driver);
+ platform_driver_unregister(&hdmi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ platform_driver_unregister(&fimd_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DSI
+ platform_driver_unregister(&dsi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_DP
+ platform_driver_unregister(&dp_driver);
+#endif
+ component_master_del(&pdev->dev, &exynos_drm_ops);
+ return 0;
+}
+
+static struct platform_driver exynos_drm_platform_driver = {
+ .probe = exynos_drm_platform_probe,
+ .remove = exynos_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "exynos-drm",
+ .pm = &exynos_drm_pm_ops,
+ },
+};
+
+static int exynos_drm_init(void)
+{
+ int ret;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR(exynos_drm_pdev))
+ return PTR_ERR(exynos_drm_pdev);
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ ret = exynos_drm_probe_vidi();
+ if (ret < 0)
+ goto err_unregister_pd;
+#endif
+
+ ret = platform_driver_register(&exynos_drm_platform_driver);
+ if (ret)
+ goto err_remove_vidi;
+
+ return 0;
+
+err_remove_vidi:
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ exynos_drm_remove_vidi();
+
+err_unregister_pd:
+#endif
+ platform_device_unregister(exynos_drm_pdev);
+
+ return ret;
+}
+
+static void exynos_drm_exit(void)
+{
+ platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ exynos_drm_remove_vidi();
+#endif
+ platform_device_unregister(exynos_drm_pdev);
+}
+
+module_init(exynos_drm_init);
+module_exit(exynos_drm_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
new file mode 100644
index 00000000000..06cde450627
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -0,0 +1,383 @@
+/* exynos_drm_drv.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_DRV_H_
+#define _EXYNOS_DRM_DRV_H_
+
+#include <linux/module.h>
+
+#define MAX_CRTC 3
+#define MAX_PLANE 5
+#define MAX_FB_BUFFER 4
+#define DEFAULT_ZPOS -1
+
+#define _wait_for(COND, MS) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS)
+
+struct drm_device;
+struct exynos_drm_overlay;
+struct drm_connector;
+
+extern unsigned int drm_vblank_offdelay;
+
+/* This enumerates device type. */
+enum exynos_drm_device_type {
+ EXYNOS_DEVICE_TYPE_NONE,
+ EXYNOS_DEVICE_TYPE_CRTC,
+ EXYNOS_DEVICE_TYPE_CONNECTOR,
+};
+
+/* this enumerates display type. */
+enum exynos_drm_output_type {
+ EXYNOS_DISPLAY_TYPE_NONE,
+ /* RGB or CPU Interface. */
+ EXYNOS_DISPLAY_TYPE_LCD,
+ /* HDMI Interface. */
+ EXYNOS_DISPLAY_TYPE_HDMI,
+ /* Virtual Display Interface. */
+ EXYNOS_DISPLAY_TYPE_VIDI,
+};
+
+/*
+ * Exynos drm common overlay structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displayed.
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed.
+ * - the unit is screen coordinates.
+ * @fb_width: width of a framebuffer.
+ * @fb_height: height of a framebuffer.
+ * @src_width: width of a partial image to be displayed from framebuffer.
+ * @src_height: height of a partial image to be displayed from framebuffer.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_width: window width to be displayed (hardware screen).
+ * @crtc_height: window height to be displayed (hardware screen).
+ * @mode_width: width of screen mode.
+ * @mode_height: height of screen mode.
+ * @refresh: refresh rate.
+ * @scan_flag: interlace or progressive way.
+ * (it could be DRM_MODE_FLAG_*)
+ * @bpp: pixel size.(in bit)
+ * @pixel_format: fourcc pixel format of this overlay
+ * @dma_addr: array of bus(accessed by dma) address to the memory region
+ * allocated for a overlay.
+ * @zpos: order of overlay layer(z position).
+ * @default_win: a window to be enabled.
+ * @color_key: color key on or off.
+ * @index_color: if using color key feature then this value would be used
+ * as index color.
+ * @local_path: in case of lcd type, local path mode on or off.
+ * @transparency: transparency on or off.
+ * @activated: activated or not.
+ *
+ * this structure is common to exynos SoC and its contents would be copied
+ * to hardware specific overlay info.
+ */
+struct exynos_drm_overlay {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_width;
+ unsigned int crtc_height;
+ unsigned int mode_width;
+ unsigned int mode_height;
+ unsigned int refresh;
+ unsigned int scan_flag;
+ unsigned int bpp;
+ unsigned int pitch;
+ uint32_t pixel_format;
+ dma_addr_t dma_addr[MAX_FB_BUFFER];
+ int zpos;
+
+ bool default_win;
+ bool color_key;
+ unsigned int index_color;
+ bool local_path;
+ bool transparency;
+ bool activated;
+};
+
+/*
+ * Exynos DRM Display Structure.
+ * - this structure is common to analog tv, digital tv and lcd panel.
+ *
+ * @remove: cleans up the display for removal
+ * @mode_fixup: fix mode data comparing to hw specific display mode.
+ * @mode_set: convert drm_display_mode to hw specific display mode and
+ * would be called by encoder->mode_set().
+ * @check_mode: check if mode is valid or not.
+ * @dpms: display device on or off.
+ * @commit: apply changes to hw
+ */
+struct exynos_drm_display;
+struct exynos_drm_display_ops {
+ int (*create_connector)(struct exynos_drm_display *display,
+ struct drm_encoder *encoder);
+ void (*remove)(struct exynos_drm_display *display);
+ void (*mode_fixup)(struct exynos_drm_display *display,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct exynos_drm_display *display,
+ struct drm_display_mode *mode);
+ int (*check_mode)(struct exynos_drm_display *display,
+ struct drm_display_mode *mode);
+ void (*dpms)(struct exynos_drm_display *display, int mode);
+ void (*commit)(struct exynos_drm_display *display);
+};
+
+/*
+ * Exynos drm display structure, maps 1:1 with an encoder/connector
+ *
+ * @list: the list entry for this manager
+ * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+ * @encoder: encoder object this display maps to
+ * @connector: connector object this display maps to
+ * @ops: pointer to callbacks for exynos drm specific functionality
+ * @ctx: A pointer to the display's implementation specific context
+ */
+struct exynos_drm_display {
+ struct list_head list;
+ enum exynos_drm_output_type type;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct exynos_drm_display_ops *ops;
+ void *ctx;
+};
+
+/*
+ * Exynos drm manager ops
+ *
+ * @dpms: control device power.
+ * @mode_fixup: fix mode data before applying it
+ * @mode_set: set the given mode to the manager
+ * @commit: set current hw specific display mode to hw.
+ * @enable_vblank: specific driver callback for enabling vblank interrupt.
+ * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ * hardware overlay is updated.
+ * @win_mode_set: copy drm overlay info to hw specific overlay info.
+ * @win_commit: apply hardware specific overlay data to registers.
+ * @win_enable: enable hardware specific overlay.
+ * @win_disable: disable hardware specific overlay.
+ */
+struct exynos_drm_manager;
+struct exynos_drm_manager_ops {
+ void (*dpms)(struct exynos_drm_manager *mgr, int mode);
+ bool (*mode_fixup)(struct exynos_drm_manager *mgr,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct exynos_drm_manager *mgr,
+ const struct drm_display_mode *mode);
+ void (*commit)(struct exynos_drm_manager *mgr);
+ int (*enable_vblank)(struct exynos_drm_manager *mgr);
+ void (*disable_vblank)(struct exynos_drm_manager *mgr);
+ void (*wait_for_vblank)(struct exynos_drm_manager *mgr);
+ void (*win_mode_set)(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay);
+ void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
+ void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
+ void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
+};
+
+/*
+ * Exynos drm common manager structure, maps 1:1 with a crtc
+ *
+ * @list: the list entry for this manager
+ * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+ * @drm_dev: pointer to the drm device
+ * @crtc: crtc object.
+ * @pipe: the pipe number for this crtc/manager
+ * @ops: pointer to callbacks for exynos drm specific functionality
+ * @ctx: A pointer to the manager's implementation specific context
+ */
+struct exynos_drm_manager {
+ struct list_head list;
+ enum exynos_drm_output_type type;
+ struct drm_device *drm_dev;
+ struct drm_crtc *crtc;
+ int pipe;
+ struct exynos_drm_manager_ops *ops;
+ void *ctx;
+};
+
+struct exynos_drm_g2d_private {
+ struct device *dev;
+ struct list_head inuse_cmdlist;
+ struct list_head event_list;
+ struct list_head userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ struct list_head event_list;
+};
+
+struct drm_exynos_file_private {
+ struct exynos_drm_g2d_private *g2d_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
+ struct file *anon_filp;
+};
+
+/*
+ * Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @pipe: the pipe number for this crtc/manager.
+ */
+struct exynos_drm_private {
+ struct drm_fb_helper *fb_helper;
+
+ /* list head for new event to be added. */
+ struct list_head pageflip_event_list;
+
+ /*
+ * created crtc object would be contained at this array and
+ * this array is used to be aware of which crtc did it request vblank.
+ */
+ struct drm_crtc *crtc[MAX_CRTC];
+ struct drm_property *plane_zpos_property;
+ struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+
+ unsigned int pipe;
+};
+
+/*
+ * Exynos drm sub driver structure.
+ *
+ * @list: sub driver has its own list object to register to exynos drm driver.
+ * @dev: pointer to device object for subdrv device driver.
+ * @drm_dev: pointer to drm_device and this pointer would be set
+ * when sub driver calls exynos_drm_subdrv_register().
+ * @manager: subdrv has its own manager to control a hardware appropriately
+ * and we can access a hardware drawing on this manager.
+ * @probe: this callback would be called by exynos drm driver after
+ * subdrv is registered to it.
+ * @remove: this callback is used to release resources created
+ * by probe callback.
+ * @open: this would be called with drm device file open.
+ * @close: this would be called with drm device file close.
+ */
+struct exynos_drm_subdrv {
+ struct list_head list;
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ int (*probe)(struct drm_device *drm_dev, struct device *dev);
+ void (*remove)(struct drm_device *drm_dev, struct device *dev);
+ int (*open)(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file);
+ void (*close)(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file);
+};
+
+ /* This function would be called by non kms drivers such as g2d and ipp. */
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
+
+/* this function removes subdrv list from exynos drm driver */
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
+
+int exynos_drm_device_subdrv_probe(struct drm_device *dev);
+int exynos_drm_device_subdrv_remove(struct drm_device *dev);
+int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
+void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
+/*
+ * this function registers exynos drm ipp platform device.
+ */
+int exynos_platform_device_ipp_register(void);
+
+/*
+ * this function unregisters exynos drm ipp platform device if it exists.
+ */
+void exynos_platform_device_ipp_unregister(void);
+
+#ifdef CONFIG_DRM_EXYNOS_DPI
+struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
+int exynos_dpi_remove(struct device *dev);
+#else
+static inline struct exynos_drm_display *
+exynos_dpi_probe(struct device *dev) { return NULL; }
+static inline int exynos_dpi_remove(struct device *dev) { return 0; }
+#endif
+
+/*
+ * this function registers exynos drm vidi platform device/driver.
+ */
+int exynos_drm_probe_vidi(void);
+
+/*
+ * this function unregister exynos drm vidi platform device/driver.
+ */
+void exynos_drm_remove_vidi(void);
+
+/* This function creates a encoder and a connector, and initializes them. */
+int exynos_drm_create_enc_conn(struct drm_device *dev,
+ struct exynos_drm_display *display);
+
+int exynos_drm_component_add(struct device *dev,
+ enum exynos_drm_device_type dev_type,
+ enum exynos_drm_output_type out_type);
+
+void exynos_drm_component_del(struct device *dev,
+ enum exynos_drm_device_type dev_type);
+
+extern struct platform_driver fimd_driver;
+extern struct platform_driver dp_driver;
+extern struct platform_driver dsi_driver;
+extern struct platform_driver mixer_driver;
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver exynos_drm_common_hdmi_driver;
+extern struct platform_driver vidi_driver;
+extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
new file mode 100644
index 00000000000..6302aa64f6c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -0,0 +1,1546 @@
+/*
+ * Samsung SoC MIPI DSI Master driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ *
+ * Contacts: Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/component.h>
+
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+#include "exynos_drm_drv.h"
+
+/* returns true iff both arguments logically differs */
+#define NEQV(a, b) (!(a) ^ !(b))
+
+#define DSIM_STATUS_REG 0x0 /* Status register */
+#define DSIM_SWRST_REG 0x4 /* Software reset register */
+#define DSIM_CLKCTRL_REG 0x8 /* Clock control register */
+#define DSIM_TIMEOUT_REG 0xc /* Time out register */
+#define DSIM_CONFIG_REG 0x10 /* Configuration register */
+#define DSIM_ESCMODE_REG 0x14 /* Escape mode register */
+
+/* Main display image resolution register */
+#define DSIM_MDRESOL_REG 0x18
+#define DSIM_MVPORCH_REG 0x1c /* Main display Vporch register */
+#define DSIM_MHPORCH_REG 0x20 /* Main display Hporch register */
+#define DSIM_MSYNC_REG 0x24 /* Main display sync area register */
+
+/* Sub display image resolution register */
+#define DSIM_SDRESOL_REG 0x28
+#define DSIM_INTSRC_REG 0x2c /* Interrupt source register */
+#define DSIM_INTMSK_REG 0x30 /* Interrupt mask register */
+#define DSIM_PKTHDR_REG 0x34 /* Packet Header FIFO register */
+#define DSIM_PAYLOAD_REG 0x38 /* Payload FIFO register */
+#define DSIM_RXFIFO_REG 0x3c /* Read FIFO register */
+#define DSIM_FIFOTHLD_REG 0x40 /* FIFO threshold level register */
+#define DSIM_FIFOCTRL_REG 0x44 /* FIFO status and control register */
+
+/* FIFO memory AC characteristic register */
+#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
+#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */
+#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
+#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
+
+/* DSIM_STATUS */
+#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
+#define DSIM_STOP_STATE_CLK (1 << 8)
+#define DSIM_TX_READY_HS_CLK (1 << 10)
+#define DSIM_PLL_STABLE (1 << 31)
+
+/* DSIM_SWRST */
+#define DSIM_FUNCRST (1 << 16)
+#define DSIM_SWRST (1 << 0)
+
+/* DSIM_TIMEOUT */
+#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
+#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
+
+/* DSIM_CLKCTRL */
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_CLK (1 << 19)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
+#define DSIM_BYTE_CLKEN (1 << 24)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS (1 << 27)
+#define DSIM_ESC_CLKEN (1 << 28)
+#define DSIM_TX_REQUEST_HSCLK (1 << 31)
+
+/* DSIM_CONFIG */
+#define DSIM_LANE_EN_CLK (1 << 0)
+#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
+#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
+#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
+#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
+#define DSIM_SUB_VC (((x) & 0x3) << 16)
+#define DSIM_MAIN_VC (((x) & 0x3) << 18)
+#define DSIM_HSA_MODE (1 << 20)
+#define DSIM_HBP_MODE (1 << 21)
+#define DSIM_HFP_MODE (1 << 22)
+#define DSIM_HSE_MODE (1 << 23)
+#define DSIM_AUTO_MODE (1 << 24)
+#define DSIM_VIDEO_MODE (1 << 25)
+#define DSIM_BURST_MODE (1 << 26)
+#define DSIM_SYNC_INFORM (1 << 27)
+#define DSIM_EOT_DISABLE (1 << 28)
+#define DSIM_MFLUSH_VS (1 << 29)
+
+/* DSIM_ESCMODE */
+#define DSIM_TX_TRIGGER_RST (1 << 4)
+#define DSIM_TX_LPDT_LP (1 << 6)
+#define DSIM_CMD_LPDT_LP (1 << 7)
+#define DSIM_FORCE_BTA (1 << 16)
+#define DSIM_FORCE_STOP_STATE (1 << 20)
+#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
+#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
+
+/* DSIM_MDRESOL */
+#define DSIM_MAIN_STAND_BY (1 << 31)
+#define DSIM_MAIN_VRESOL(x) (((x) & 0x7ff) << 16)
+#define DSIM_MAIN_HRESOL(x) (((x) & 0X7ff) << 0)
+
+/* DSIM_MVPORCH */
+#define DSIM_CMD_ALLOW(x) ((x) << 28)
+#define DSIM_STABLE_VFP(x) ((x) << 16)
+#define DSIM_MAIN_VBP(x) ((x) << 0)
+#define DSIM_CMD_ALLOW_MASK (0xf << 28)
+#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
+#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
+
+/* DSIM_MHPORCH */
+#define DSIM_MAIN_HFP(x) ((x) << 16)
+#define DSIM_MAIN_HBP(x) ((x) << 0)
+#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
+#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
+
+/* DSIM_MSYNC */
+#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_HSA(x) ((x) << 0)
+#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
+
+/* DSIM_SDRESOL */
+#define DSIM_SUB_STANDY(x) ((x) << 31)
+#define DSIM_SUB_VRESOL(x) ((x) << 16)
+#define DSIM_SUB_HRESOL(x) ((x) << 0)
+#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
+#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
+#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
+
+/* DSIM_INTSRC */
+#define DSIM_INT_PLL_STABLE (1 << 31)
+#define DSIM_INT_SW_RST_RELEASE (1 << 30)
+#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29)
+#define DSIM_INT_BTA (1 << 25)
+#define DSIM_INT_FRAME_DONE (1 << 24)
+#define DSIM_INT_RX_TIMEOUT (1 << 21)
+#define DSIM_INT_BTA_TIMEOUT (1 << 20)
+#define DSIM_INT_RX_DONE (1 << 18)
+#define DSIM_INT_RX_TE (1 << 17)
+#define DSIM_INT_RX_ACK (1 << 16)
+#define DSIM_INT_RX_ECC_ERR (1 << 15)
+#define DSIM_INT_RX_CRC_ERR (1 << 14)
+
+/* DSIM_FIFOCTRL */
+#define DSIM_RX_DATA_FULL (1 << 25)
+#define DSIM_RX_DATA_EMPTY (1 << 24)
+#define DSIM_SFR_HEADER_FULL (1 << 23)
+#define DSIM_SFR_HEADER_EMPTY (1 << 22)
+#define DSIM_SFR_PAYLOAD_FULL (1 << 21)
+#define DSIM_SFR_PAYLOAD_EMPTY (1 << 20)
+#define DSIM_I80_HEADER_FULL (1 << 19)
+#define DSIM_I80_HEADER_EMPTY (1 << 18)
+#define DSIM_I80_PAYLOAD_FULL (1 << 17)
+#define DSIM_I80_PAYLOAD_EMPTY (1 << 16)
+#define DSIM_SD_HEADER_FULL (1 << 15)
+#define DSIM_SD_HEADER_EMPTY (1 << 14)
+#define DSIM_SD_PAYLOAD_FULL (1 << 13)
+#define DSIM_SD_PAYLOAD_EMPTY (1 << 12)
+#define DSIM_MD_HEADER_FULL (1 << 11)
+#define DSIM_MD_HEADER_EMPTY (1 << 10)
+#define DSIM_MD_PAYLOAD_FULL (1 << 9)
+#define DSIM_MD_PAYLOAD_EMPTY (1 << 8)
+#define DSIM_RX_FIFO (1 << 4)
+#define DSIM_SFR_FIFO (1 << 3)
+#define DSIM_I80_FIFO (1 << 2)
+#define DSIM_SD_FIFO (1 << 1)
+#define DSIM_MD_FIFO (1 << 0)
+
+/* DSIM_PHYACCHR */
+#define DSIM_AFC_EN (1 << 14)
+#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
+
+/* DSIM_PLLCTRL */
+#define DSIM_FREQ_BAND(x) ((x) << 24)
+#define DSIM_PLL_EN (1 << 23)
+#define DSIM_PLL_P(x) ((x) << 13)
+#define DSIM_PLL_M(x) ((x) << 4)
+#define DSIM_PLL_S(x) ((x) << 1)
+
+#define DSI_MAX_BUS_WIDTH 4
+#define DSI_NUM_VIRTUAL_CHANNELS 4
+#define DSI_TX_FIFO_SIZE 2048
+#define DSI_RX_FIFO_SIZE 256
+#define DSI_XFER_TIMEOUT_MS 100
+#define DSI_RX_FIFO_EMPTY 0x30800002
+
+enum exynos_dsi_transfer_type {
+ EXYNOS_DSI_TX,
+ EXYNOS_DSI_RX,
+};
+
+struct exynos_dsi_transfer {
+ struct list_head list;
+ struct completion completed;
+ int result;
+ u8 data_id;
+ u8 data[2];
+ u16 flags;
+
+ const u8 *tx_payload;
+ u16 tx_len;
+ u16 tx_done;
+
+ u8 *rx_payload;
+ u16 rx_len;
+ u16 rx_done;
+};
+
+#define DSIM_STATE_ENABLED BIT(0)
+#define DSIM_STATE_INITIALIZED BIT(1)
+#define DSIM_STATE_CMD_LPM BIT(2)
+
+struct exynos_dsi {
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+
+ void __iomem *reg_base;
+ struct phy *phy;
+ struct clk *pll_clk;
+ struct clk *bus_clk;
+ struct regulator_bulk_data supplies[2];
+ int irq;
+
+ u32 pll_clk_rate;
+ u32 burst_clk_rate;
+ u32 esc_clk_rate;
+ u32 lanes;
+ u32 mode_flags;
+ u32 format;
+ struct videomode vm;
+
+ int state;
+ struct drm_property *brightness;
+ struct completion completed;
+
+ spinlock_t transfer_lock; /* protects transfer_list */
+ struct list_head transfer_list;
+};
+
+#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+
+static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
+{
+ if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
+ return;
+
+ dev_err(dsi->dev, "timeout waiting for reset\n");
+}
+
+static void exynos_dsi_reset(struct exynos_dsi *dsi)
+{
+ reinit_completion(&dsi->completed);
+ writel(DSIM_SWRST, dsi->reg_base + DSIM_SWRST_REG);
+}
+
+#ifndef MHZ
+#define MHZ (1000*1000)
+#endif
+
+static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
+ unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
+{
+ unsigned long best_freq = 0;
+ u32 min_delta = 0xffffffff;
+ u8 p_min, p_max;
+ u8 _p, uninitialized_var(best_p);
+ u16 _m, uninitialized_var(best_m);
+ u8 _s, uninitialized_var(best_s);
+
+ p_min = DIV_ROUND_UP(fin, (12 * MHZ));
+ p_max = fin / (6 * MHZ);
+
+ for (_p = p_min; _p <= p_max; ++_p) {
+ for (_s = 0; _s <= 5; ++_s) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * (_p << _s);
+ do_div(tmp, fin);
+ _m = tmp;
+ if (_m < 41 || _m > 125)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p);
+ if (tmp < 500 * MHZ || tmp > 1000 * MHZ)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p << _s);
+
+ delta = abs(fout - tmp);
+ if (delta < min_delta) {
+ best_p = _p;
+ best_m = _m;
+ best_s = _s;
+ min_delta = delta;
+ best_freq = tmp;
+ }
+ }
+ }
+
+ if (best_freq) {
+ *p = best_p;
+ *m = best_m;
+ *s = best_s;
+ }
+
+ return best_freq;
+}
+
+static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
+ unsigned long freq)
+{
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ unsigned long fin, fout;
+ int timeout, band;
+ u8 p, s;
+ u16 m;
+ u32 reg;
+
+ clk_set_rate(dsi->pll_clk, dsi->pll_clk_rate);
+
+ fin = clk_get_rate(dsi->pll_clk);
+ if (!fin) {
+ dev_err(dsi->dev, "failed to get PLL clock frequency\n");
+ return 0;
+ }
+
+ dev_dbg(dsi->dev, "PLL input frequency: %lu\n", fin);
+
+ fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
+ if (!fout) {
+ dev_err(dsi->dev,
+ "failed to find PLL PMS for requested frequency\n");
+ return -EFAULT;
+ }
+
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
+ p, m, s, band);
+
+ writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
+
+ reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
+ | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
+ writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
+
+ timeout = 1000;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "PLL failed to stabilize\n");
+ return -EFAULT;
+ }
+ reg = readl(dsi->reg_base + DSIM_STATUS_REG);
+ } while ((reg & DSIM_PLL_STABLE) == 0);
+
+ return fout;
+}
+
+static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
+{
+ unsigned long hs_clk, byte_clk, esc_clk;
+ unsigned long esc_div;
+ u32 reg;
+
+ hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate);
+ if (!hs_clk) {
+ dev_err(dsi->dev, "failed to configure DSI PLL\n");
+ return -EFAULT;
+ }
+
+ byte_clk = hs_clk / 8;
+ esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
+ esc_clk = byte_clk / esc_div;
+
+ if (esc_clk > 20 * MHZ) {
+ ++esc_div;
+ esc_clk = byte_clk / esc_div;
+ }
+
+ dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
+ hs_clk, byte_clk, esc_clk);
+
+ reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
+ | DSIM_ESC_PRESCALER(esc_div)
+ | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
+ | DSIM_BYTE_CLK_SRC(0)
+ | DSIM_TX_REQUEST_HSCLK;
+ writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
+
+ return 0;
+}
+
+static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
+{
+ u32 reg;
+
+ reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
+ | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
+
+ reg = readl(dsi->reg_base + DSIM_PLLCTRL_REG);
+ reg &= ~DSIM_PLL_EN;
+ writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
+}
+
+static int exynos_dsi_init_link(struct exynos_dsi *dsi)
+{
+ int timeout;
+ u32 reg;
+ u32 lanes_mask;
+
+ /* Initialize FIFO pointers */
+ reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
+ reg &= ~0x1f;
+ writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+ usleep_range(9000, 11000);
+
+ reg |= 0x1f;
+ writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+ usleep_range(9000, 11000);
+
+ /* DSI configuration */
+ reg = 0;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg |= DSIM_VIDEO_MODE;
+
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
+ reg |= DSIM_MFLUSH_VS;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+ reg |= DSIM_EOT_DISABLE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ reg |= DSIM_SYNC_INFORM;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ reg |= DSIM_BURST_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
+ reg |= DSIM_AUTO_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+ reg |= DSIM_HSE_MODE;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP))
+ reg |= DSIM_HFP_MODE;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP))
+ reg |= DSIM_HBP_MODE;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA))
+ reg |= DSIM_HSA_MODE;
+ }
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
+ break;
+ default:
+ dev_err(dsi->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+
+ reg |= DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1);
+
+ writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+ reg |= DSIM_LANE_EN_CLK;
+ writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+ lanes_mask = BIT(dsi->lanes) - 1;
+ reg |= DSIM_LANE_EN(lanes_mask);
+ writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+ /* Check clock and data lane state are stop state */
+ timeout = 100;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "waiting for bus lanes timed out\n");
+ return -EFAULT;
+ }
+
+ reg = readl(dsi->reg_base + DSIM_STATUS_REG);
+ if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
+ != DSIM_STOP_STATE_DAT(lanes_mask))
+ continue;
+ } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
+
+ reg = readl(dsi->reg_base + DSIM_ESCMODE_REG);
+ reg &= ~DSIM_STOP_STATE_CNT_MASK;
+ reg |= DSIM_STOP_STATE_CNT(0xf);
+ writel(reg, dsi->reg_base + DSIM_ESCMODE_REG);
+
+ reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+ writel(reg, dsi->reg_base + DSIM_TIMEOUT_REG);
+
+ return 0;
+}
+
+static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg = DSIM_CMD_ALLOW(0xf)
+ | DSIM_STABLE_VFP(vm->vfront_porch)
+ | DSIM_MAIN_VBP(vm->vback_porch);
+ writel(reg, dsi->reg_base + DSIM_MVPORCH_REG);
+
+ reg = DSIM_MAIN_HFP(vm->hfront_porch)
+ | DSIM_MAIN_HBP(vm->hback_porch);
+ writel(reg, dsi->reg_base + DSIM_MHPORCH_REG);
+
+ reg = DSIM_MAIN_VSA(vm->vsync_len)
+ | DSIM_MAIN_HSA(vm->hsync_len);
+ writel(reg, dsi->reg_base + DSIM_MSYNC_REG);
+ }
+
+ reg = DSIM_MAIN_HRESOL(vm->hactive) | DSIM_MAIN_VRESOL(vm->vactive);
+ writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
+{
+ u32 reg;
+
+ reg = readl(dsi->reg_base + DSIM_MDRESOL_REG);
+ if (enable)
+ reg |= DSIM_MAIN_STAND_BY;
+ else
+ reg &= ~DSIM_MAIN_STAND_BY;
+ writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
+}
+
+static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
+{
+ int timeout = 2000;
+
+ do {
+ u32 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+ if (!(reg & DSIM_SFR_HEADER_FULL))
+ return 0;
+
+ if (!cond_resched())
+ usleep_range(950, 1050);
+ } while (--timeout);
+
+ return -ETIMEDOUT;
+}
+
+static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
+{
+ u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG);
+
+ if (lpm)
+ v |= DSIM_CMD_LPDT_LP;
+ else
+ v &= ~DSIM_CMD_LPDT_LP;
+
+ writel(v, dsi->reg_base + DSIM_ESCMODE_REG);
+}
+
+static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
+{
+ u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG);
+
+ v |= DSIM_FORCE_BTA;
+ writel(v, dsi->reg_base + DSIM_ESCMODE_REG);
+}
+
+static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ struct device *dev = dsi->dev;
+ const u8 *payload = xfer->tx_payload + xfer->tx_done;
+ u16 length = xfer->tx_len - xfer->tx_done;
+ bool first = !xfer->tx_done;
+ u32 reg;
+
+ dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
+ xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+ if (length > DSI_TX_FIFO_SIZE)
+ length = DSI_TX_FIFO_SIZE;
+
+ xfer->tx_done += length;
+
+ /* Send payload */
+ while (length >= 4) {
+ reg = (payload[3] << 24) | (payload[2] << 16)
+ | (payload[1] << 8) | payload[0];
+ writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
+ payload += 4;
+ length -= 4;
+ }
+
+ reg = 0;
+ switch (length) {
+ case 3:
+ reg |= payload[2] << 16;
+ /* Fall through */
+ case 2:
+ reg |= payload[1] << 8;
+ /* Fall through */
+ case 1:
+ reg |= payload[0];
+ writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
+ break;
+ case 0:
+ /* Do nothing */
+ break;
+ }
+
+ /* Send packet header */
+ if (!first)
+ return;
+
+ reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->data_id;
+ if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
+
+ if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
+ dsi->state & DSIM_STATE_CMD_LPM)) {
+ exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
+ dsi->state ^= DSIM_STATE_CMD_LPM;
+ }
+
+ writel(reg, dsi->reg_base + DSIM_PKTHDR_REG);
+
+ if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
+ exynos_dsi_force_bta(dsi);
+}
+
+static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ u8 *payload = xfer->rx_payload + xfer->rx_done;
+ bool first = !xfer->rx_done;
+ struct device *dev = dsi->dev;
+ u16 length;
+ u32 reg;
+
+ if (first) {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+
+ switch (reg & 0x3f) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->rx_len >= 2) {
+ payload[1] = reg >> 16;
+ ++xfer->rx_done;
+ }
+ /* Fall through */
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ payload[0] = reg >> 8;
+ ++xfer->rx_done;
+ xfer->rx_len = xfer->rx_done;
+ xfer->result = 0;
+ goto clear_fifo;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_err(dev, "DSI Error Report: 0x%04x\n",
+ (reg >> 8) & 0xffff);
+ xfer->result = 0;
+ goto clear_fifo;
+ }
+
+ length = (reg >> 8) & 0xffff;
+ if (length > xfer->rx_len) {
+ dev_err(dev,
+ "response too long (%u > %u bytes), stripping\n",
+ xfer->rx_len, length);
+ length = xfer->rx_len;
+ } else if (length < xfer->rx_len)
+ xfer->rx_len = length;
+ }
+
+ length = xfer->rx_len - xfer->rx_done;
+ xfer->rx_done += length;
+
+ /* Receive payload */
+ while (length >= 4) {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+ payload[0] = (reg >> 0) & 0xff;
+ payload[1] = (reg >> 8) & 0xff;
+ payload[2] = (reg >> 16) & 0xff;
+ payload[3] = (reg >> 24) & 0xff;
+ payload += 4;
+ length -= 4;
+ }
+
+ if (length) {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+ switch (length) {
+ case 3:
+ payload[2] = (reg >> 16) & 0xff;
+ /* Fall through */
+ case 2:
+ payload[1] = (reg >> 8) & 0xff;
+ /* Fall through */
+ case 1:
+ payload[0] = reg & 0xff;
+ }
+ }
+
+ if (xfer->rx_done == xfer->rx_len)
+ xfer->result = 0;
+
+clear_fifo:
+ length = DSI_RX_FIFO_SIZE / 4;
+ do {
+ reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+ if (reg == DSI_RX_FIFO_EMPTY)
+ break;
+ } while (--length);
+}
+
+static void exynos_dsi_transfer_start(struct exynos_dsi *dsi)
+{
+ unsigned long flags;
+ struct exynos_dsi_transfer *xfer;
+ bool start = false;
+
+again:
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct exynos_dsi_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (xfer->tx_len && xfer->tx_done == xfer->tx_len)
+ /* waiting for RX */
+ return;
+
+ exynos_dsi_send_to_fifo(dsi, xfer);
+
+ if (xfer->tx_len || xfer->rx_len)
+ return;
+
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (start)
+ goto again;
+}
+
+static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
+{
+ struct exynos_dsi_transfer *xfer;
+ unsigned long flags;
+ bool start = true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return false;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct exynos_dsi_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ dev_dbg(dsi->dev,
+ "> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n",
+ xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+ if (xfer->tx_done != xfer->tx_len)
+ return true;
+
+ if (xfer->rx_done != xfer->rx_len)
+ exynos_dsi_read_from_fifo(dsi, xfer);
+
+ if (xfer->rx_done != xfer->rx_len)
+ return true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (!xfer->rx_len)
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ return start;
+}
+
+static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ unsigned long flags;
+ bool start;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (!list_empty(&dsi->transfer_list) &&
+ xfer == list_first_entry(&dsi->transfer_list,
+ struct exynos_dsi_transfer, list)) {
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ if (start)
+ exynos_dsi_transfer_start(dsi);
+ return;
+ }
+
+ list_del_init(&xfer->list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+}
+
+static int exynos_dsi_transfer(struct exynos_dsi *dsi,
+ struct exynos_dsi_transfer *xfer)
+{
+ unsigned long flags;
+ bool stopped;
+
+ xfer->tx_done = 0;
+ xfer->rx_done = 0;
+ xfer->result = -ETIMEDOUT;
+ init_completion(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ stopped = list_empty(&dsi->transfer_list);
+ list_add_tail(&xfer->list, &dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (stopped)
+ exynos_dsi_transfer_start(dsi);
+
+ wait_for_completion_timeout(&xfer->completed,
+ msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
+ if (xfer->result == -ETIMEDOUT) {
+ exynos_dsi_remove_transfer(dsi, xfer);
+ dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 2, xfer->data,
+ xfer->tx_len, xfer->tx_payload);
+ return -ETIMEDOUT;
+ }
+
+ /* Also covers hardware timeout condition */
+ return xfer->result;
+}
+
+static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
+{
+ struct exynos_dsi *dsi = dev_id;
+ u32 status;
+
+ status = readl(dsi->reg_base + DSIM_INTSRC_REG);
+ if (!status) {
+ static unsigned long int j;
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(dsi->dev, "spurious interrupt\n");
+ return IRQ_HANDLED;
+ }
+ writel(status, dsi->reg_base + DSIM_INTSRC_REG);
+
+ if (status & DSIM_INT_SW_RST_RELEASE) {
+ u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY);
+ writel(mask, dsi->reg_base + DSIM_INTMSK_REG);
+ complete(&dsi->completed);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY)))
+ return IRQ_HANDLED;
+
+ if (exynos_dsi_transfer_finish(dsi))
+ exynos_dsi_transfer_start(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static int exynos_dsi_init(struct exynos_dsi *dsi)
+{
+ exynos_dsi_enable_clock(dsi);
+ exynos_dsi_reset(dsi);
+ enable_irq(dsi->irq);
+ exynos_dsi_wait_for_reset(dsi);
+ exynos_dsi_init_link(dsi);
+
+ return 0;
+}
+
+static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel_node = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+/* distinguish between short and long DSI packet types */
+static bool exynos_dsi_is_short_dsi_type(u8 type)
+{
+ return (type & 0x0f) <= 8;
+}
+
+static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
+ struct mipi_dsi_msg *msg)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
+ ret = exynos_dsi_init(dsi);
+ if (ret)
+ return ret;
+ dsi->state |= DSIM_STATE_INITIALIZED;
+ }
+
+ if (msg->tx_len == 0)
+ return -EINVAL;
+
+ xfer.data_id = msg->type | (msg->channel << 6);
+
+ if (exynos_dsi_is_short_dsi_type(msg->type)) {
+ const char *tx_buf = msg->tx_buf;
+
+ if (msg->tx_len > 2)
+ return -EINVAL;
+ xfer.tx_len = 0;
+ xfer.data[0] = tx_buf[0];
+ xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0;
+ } else {
+ xfer.tx_len = msg->tx_len;
+ xfer.data[0] = msg->tx_len & 0xff;
+ xfer.data[1] = msg->tx_len >> 8;
+ xfer.tx_payload = msg->tx_buf;
+ }
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = exynos_dsi_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops exynos_dsi_ops = {
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .transfer = exynos_dsi_host_transfer,
+};
+
+static int exynos_dsi_poweron(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->bus_clk);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable bus clock %d\n", ret);
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(dsi->pll_clk);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable pll clock %d\n", ret);
+ goto err_pll_clk;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable phy %d\n", ret);
+ goto err_phy;
+ }
+
+ return 0;
+
+err_phy:
+ clk_disable_unprepare(dsi->pll_clk);
+err_pll_clk:
+ clk_disable_unprepare(dsi->bus_clk);
+err_bus_clk:
+ regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+
+ return ret;
+}
+
+static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ usleep_range(10000, 20000);
+
+ if (dsi->state & DSIM_STATE_INITIALIZED) {
+ dsi->state &= ~DSIM_STATE_INITIALIZED;
+
+ exynos_dsi_disable_clock(dsi);
+
+ disable_irq(dsi->irq);
+ }
+
+ dsi->state &= ~DSIM_STATE_CMD_LPM;
+
+ phy_power_off(dsi->phy);
+
+ clk_disable_unprepare(dsi->pll_clk);
+ clk_disable_unprepare(dsi->bus_clk);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0)
+ dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
+}
+
+static int exynos_dsi_enable(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ return 0;
+
+ ret = exynos_dsi_poweron(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ exynos_dsi_poweroff(dsi);
+ return ret;
+ }
+
+ exynos_dsi_set_display_mode(dsi);
+ exynos_dsi_set_display_enable(dsi, true);
+
+ dsi->state |= DSIM_STATE_ENABLED;
+
+ return 0;
+}
+
+static void exynos_dsi_disable(struct exynos_dsi *dsi)
+{
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return;
+
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_disable(dsi->panel);
+ exynos_dsi_poweroff(dsi);
+
+ dsi->state &= ~DSIM_STATE_ENABLED;
+}
+
+static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
+{
+ struct exynos_dsi *dsi = display->ctx;
+
+ if (dsi->panel) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_dsi_enable(dsi);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_dsi_disable(dsi);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static enum drm_connector_status
+exynos_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel)
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ } else if (!dsi->panel_node) {
+ struct exynos_drm_display *display;
+
+ display = platform_get_drvdata(to_platform_device(dsi->dev));
+ exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void exynos_dsi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs exynos_dsi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = exynos_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = exynos_dsi_connector_destroy,
+};
+
+static int exynos_dsi_get_modes(struct drm_connector *connector)
+{
+ struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static int exynos_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+exynos_dsi_best_encoder(struct drm_connector *connector)
+{
+ struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+ return dsi->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
+ .get_modes = exynos_dsi_get_modes,
+ .mode_valid = exynos_dsi_mode_valid,
+ .best_encoder = exynos_dsi_best_encoder,
+};
+
+static int exynos_dsi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct exynos_dsi *dsi = display->ctx;
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ dsi->encoder = encoder;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &exynos_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void exynos_dsi_mode_set(struct exynos_drm_display *display,
+ struct drm_display_mode *mode)
+{
+ struct exynos_dsi *dsi = display->ctx;
+ struct videomode *vm = &dsi->vm;
+
+ vm->hactive = mode->hdisplay;
+ vm->vactive = mode->vdisplay;
+ vm->vfront_porch = mode->vsync_start - mode->vdisplay;
+ vm->vback_porch = mode->vtotal - mode->vsync_end;
+ vm->vsync_len = mode->vsync_end - mode->vsync_start;
+ vm->hfront_porch = mode->hsync_start - mode->hdisplay;
+ vm->hback_porch = mode->htotal - mode->hsync_end;
+ vm->hsync_len = mode->hsync_end - mode->hsync_start;
+}
+
+static struct exynos_drm_display_ops exynos_dsi_display_ops = {
+ .create_connector = exynos_dsi_create_connector,
+ .mode_set = exynos_dsi_mode_set,
+ .dpms = exynos_dsi_dpms
+};
+
+static struct exynos_drm_display exynos_dsi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &exynos_dsi_display_ops,
+};
+
+/* of_* functions will be removed after merge of of_graph patches */
+static struct device_node *
+of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(parent, np) {
+ u32 r;
+
+ if (!np->name || of_node_cmp(np->name, name))
+ continue;
+
+ if (of_property_read_u32(np, "reg", &r) < 0)
+ r = 0;
+
+ if (reg == r)
+ break;
+ }
+
+ return np;
+}
+
+static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
+ u32 reg)
+{
+ struct device_node *ports, *port;
+
+ ports = of_get_child_by_name(parent, "ports");
+ if (ports)
+ parent = ports;
+
+ port = of_get_child_by_name_reg(parent, "port", reg);
+
+ of_node_put(ports);
+
+ return port;
+}
+
+static struct device_node *
+of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
+{
+ return of_get_child_by_name_reg(port, "endpoint", reg);
+}
+
+static int exynos_dsi_of_read_u32(const struct device_node *np,
+ const char *propname, u32 *out_value)
+{
+ int ret = of_property_read_u32(np, propname, out_value);
+
+ if (ret < 0)
+ pr_err("%s: failed to get '%s' property\n", np->full_name,
+ propname);
+
+ return ret;
+}
+
+enum {
+ DSI_PORT_IN,
+ DSI_PORT_OUT
+};
+
+static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *port, *ep;
+ int ret;
+
+ ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
+ &dsi->pll_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ port = of_graph_get_port_by_reg(node, DSI_PORT_OUT);
+ if (!port) {
+ dev_err(dev, "no output port specified\n");
+ return -EINVAL;
+ }
+
+ ep = of_graph_get_endpoint_by_reg(port, 0);
+ of_node_put(port);
+ if (!ep) {
+ dev_err(dev, "no endpoint specified in output port\n");
+ return -EINVAL;
+ }
+
+ ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+ &dsi->burst_clk_rate);
+ if (ret < 0)
+ goto end;
+
+ ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+ &dsi->esc_clk_rate);
+
+end:
+ of_node_put(ep);
+
+ return ret;
+}
+
+static int exynos_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm_dev = data;
+ struct exynos_dsi *dsi;
+ int ret;
+
+ ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display);
+ if (ret) {
+ DRM_ERROR("Encoder create [%d] failed with %d\n",
+ exynos_dsi_display.type, ret);
+ return ret;
+ }
+
+ dsi = exynos_dsi_display.ctx;
+
+ return mipi_dsi_host_register(&dsi->dsi_host);
+}
+
+static void exynos_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+ struct drm_encoder *encoder = dsi->encoder;
+
+ exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
+
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&dsi->connector);
+}
+
+static const struct component_ops exynos_dsi_component_ops = {
+ .bind = exynos_dsi_bind,
+ .unbind = exynos_dsi_unbind,
+};
+
+static int exynos_dsi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct exynos_dsi *dsi;
+ int ret;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ exynos_dsi_display.type);
+ if (ret)
+ return ret;
+
+ dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi) {
+ dev_err(&pdev->dev, "failed to allocate dsi object.\n");
+ ret = -ENOMEM;
+ goto err_del_component;
+ }
+
+ init_completion(&dsi->completed);
+ spin_lock_init(&dsi->transfer_lock);
+ INIT_LIST_HEAD(&dsi->transfer_list);
+
+ dsi->dsi_host.ops = &exynos_dsi_ops;
+ dsi->dsi_host.dev = &pdev->dev;
+
+ dsi->dev = &pdev->dev;
+
+ ret = exynos_dsi_parse_dt(dsi);
+ if (ret)
+ goto err_del_component;
+
+ dsi->supplies[0].supply = "vddcore";
+ dsi->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies),
+ dsi->supplies);
+ if (ret) {
+ dev_info(&pdev->dev, "failed to get regulators: %d\n", ret);
+ return -EPROBE_DEFER;
+ }
+
+ dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
+ if (IS_ERR(dsi->pll_clk)) {
+ dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
+ ret = PTR_ERR(dsi->pll_clk);
+ goto err_del_component;
+ }
+
+ dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(dsi->bus_clk)) {
+ dev_info(&pdev->dev, "failed to get dsi bus clock\n");
+ ret = PTR_ERR(dsi->bus_clk);
+ goto err_del_component;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dsi->reg_base)) {
+ dev_err(&pdev->dev, "failed to remap io region\n");
+ ret = PTR_ERR(dsi->reg_base);
+ goto err_del_component;
+ }
+
+ dsi->phy = devm_phy_get(&pdev->dev, "dsim");
+ if (IS_ERR(dsi->phy)) {
+ dev_info(&pdev->dev, "failed to get dsim phy\n");
+ ret = PTR_ERR(dsi->phy);
+ goto err_del_component;
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0) {
+ dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+ ret = dsi->irq;
+ goto err_del_component;
+ }
+
+ irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL,
+ exynos_dsi_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), dsi);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request dsi irq\n");
+ goto err_del_component;
+ }
+
+ exynos_dsi_display.ctx = dsi;
+
+ platform_set_drvdata(pdev, &exynos_dsi_display);
+
+ ret = component_add(&pdev->dev, &exynos_dsi_component_ops);
+ if (ret)
+ goto err_del_component;
+
+ return ret;
+
+err_del_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ return ret;
+}
+
+static int exynos_dsi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &exynos_dsi_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return 0;
+}
+
+static struct of_device_id exynos_dsi_of_match[] = {
+ { .compatible = "samsung,exynos4210-mipi-dsi" },
+ { }
+};
+
+struct platform_driver dsi_driver = {
+ .probe = exynos_dsi_probe,
+ .remove = exynos_dsi_remove,
+ .driver = {
+ .name = "exynos-dsi",
+ .owner = THIS_MODULE,
+ .of_match_table = exynos_dsi_of_match,
+ },
+};
+
+MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC MIPI DSI Master");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
new file mode 100644
index 00000000000..7e282e3d603
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -0,0 +1,197 @@
+/* exynos_drm_encoder.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
+ drm_encoder)
+
+/*
+ * exynos specific encoder structure.
+ *
+ * @drm_encoder: encoder object.
+ * @display: the display structure that maps to this encoder
+ */
+struct exynos_drm_encoder {
+ struct drm_encoder drm_encoder;
+ struct exynos_drm_display *display;
+};
+
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
+
+ DRM_DEBUG_KMS("encoder dpms: %d\n", mode);
+
+ if (display->ops->dpms)
+ display->ops->dpms(display, mode);
+}
+
+static bool
+exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ if (display->ops->mode_fixup)
+ display->ops->mode_fixup(display, connector, mode,
+ adjusted_mode);
+ }
+
+ return true;
+}
+
+static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
+
+ if (display->ops->mode_set)
+ display->ops->mode_set(display, adjusted_mode);
+}
+
+static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+ /* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
+{
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
+
+ if (display->ops->dpms)
+ display->ops->dpms(display, DRM_MODE_DPMS_ON);
+
+ if (display->ops->commit)
+ display->ops->commit(display);
+}
+
+static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_plane *plane;
+ struct drm_device *dev = encoder->dev;
+
+ exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ /* all planes connected to this encoder should be also disabled. */
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ if (plane->crtc == encoder->crtc)
+ plane->funcs->disable_plane(plane);
+ }
+}
+
+static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
+ .dpms = exynos_drm_encoder_dpms,
+ .mode_fixup = exynos_drm_encoder_mode_fixup,
+ .mode_set = exynos_drm_encoder_mode_set,
+ .prepare = exynos_drm_encoder_prepare,
+ .commit = exynos_drm_encoder_commit,
+ .disable = exynos_drm_encoder_disable,
+};
+
+static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(exynos_encoder);
+}
+
+static struct drm_encoder_funcs exynos_encoder_funcs = {
+ .destroy = exynos_drm_encoder_destroy,
+};
+
+static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
+{
+ struct drm_encoder *clone;
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display *display = exynos_encoder->display;
+ unsigned int clone_mask = 0;
+ int cnt = 0;
+
+ list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
+ switch (display->type) {
+ case EXYNOS_DISPLAY_TYPE_LCD:
+ case EXYNOS_DISPLAY_TYPE_HDMI:
+ case EXYNOS_DISPLAY_TYPE_VIDI:
+ clone_mask |= (1 << (cnt++));
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return clone_mask;
+}
+
+void exynos_drm_encoder_setup(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ encoder->possible_clones = exynos_drm_encoder_clones(encoder);
+}
+
+struct drm_encoder *
+exynos_drm_encoder_create(struct drm_device *dev,
+ struct exynos_drm_display *display,
+ unsigned long possible_crtcs)
+{
+ struct drm_encoder *encoder;
+ struct exynos_drm_encoder *exynos_encoder;
+
+ if (!possible_crtcs)
+ return NULL;
+
+ exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
+ if (!exynos_encoder)
+ return NULL;
+
+ exynos_encoder->display = display;
+ encoder = &exynos_encoder->drm_encoder;
+ encoder->possible_crtcs = possible_crtcs;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
+
+ DRM_DEBUG_KMS("encoder has been created\n");
+
+ return encoder;
+}
+
+struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
+{
+ return to_exynos_encoder(encoder)->display;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
new file mode 100644
index 00000000000..b7a1620a7e7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_ENCODER_H_
+#define _EXYNOS_DRM_ENCODER_H_
+
+struct exynos_drm_manager;
+
+void exynos_drm_encoder_setup(struct drm_device *dev);
+struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
+ struct exynos_drm_display *mgr,
+ unsigned long possible_crtcs);
+struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
new file mode 100644
index 00000000000..65a22cad7b3
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -0,0 +1,327 @@
+/* exynos_drm_fb.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_crtc.h"
+
+#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
+
+/*
+ * exynos specific framebuffer structure.
+ *
+ * @fb: drm framebuffer obejct.
+ * @buf_cnt: a buffer count to drm framebuffer.
+ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
+ */
+struct exynos_drm_fb {
+ struct drm_framebuffer fb;
+ unsigned int buf_cnt;
+ struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
+};
+
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ unsigned int i;
+
+ /* make sure that overlay data are updated before relesing fb. */
+ exynos_drm_crtc_complete_scanout(fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+ struct drm_gem_object *obj;
+
+ if (exynos_fb->exynos_gem_obj[i] == NULL)
+ continue;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ kfree(exynos_fb);
+ exynos_fb = NULL;
+}
+
+static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+
+ /* This fb should have only one gem object. */
+ if (WARN_ON(exynos_fb->buf_cnt != 1))
+ return -EINVAL;
+
+ return drm_gem_handle_create(file_priv,
+ &exynos_fb->exynos_gem_obj[0]->base, handle);
+}
+
+static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ /* TODO */
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
+ .destroy = exynos_drm_fb_destroy,
+ .create_handle = exynos_drm_fb_create_handle,
+ .dirty = exynos_drm_fb_dirty,
+};
+
+void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
+ unsigned int cnt)
+{
+ struct exynos_drm_fb *exynos_fb;
+
+ exynos_fb = to_exynos_fb(fb);
+
+ exynos_fb->buf_cnt = cnt;
+}
+
+unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_fb *exynos_fb;
+
+ exynos_fb = to_exynos_fb(fb);
+
+ return exynos_fb->buf_cnt;
+}
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("failed to initialize framebuffer\n");
+ return ERR_PTR(ret);
+ }
+
+ return &exynos_fb->fb;
+}
+
+static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ unsigned int cnt = 0;
+
+ if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
+ return drm_format_num_planes(mode_cmd->pixel_format);
+
+ while (cnt != MAX_FB_BUFFER) {
+ if (!mode_cmd->handles[cnt])
+ break;
+ cnt++;
+ }
+
+ /*
+ * check if NV12 or NV12M.
+ *
+ * NV12
+ * handles[0] = base1, offsets[0] = 0
+ * handles[1] = base1, offsets[1] = Y_size
+ *
+ * NV12M
+ * handles[0] = base1, offsets[0] = 0
+ * handles[1] = base2, offsets[1] = 0
+ */
+ if (cnt == 2) {
+ /*
+ * in case of NV12 format, offsets[1] is not 0 and
+ * handles[0] is same as handles[1].
+ */
+ if (mode_cmd->offsets[1] &&
+ mode_cmd->handles[0] == mode_cmd->handles[1])
+ cnt = 1;
+ }
+
+ return cnt;
+}
+
+static struct drm_framebuffer *
+exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_fb *exynos_fb;
+ int i, ret;
+
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb)
+ return ERR_PTR(-ENOMEM);
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
+ exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
+
+ DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
+
+ for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ obj = drm_gem_object_lookup(dev, file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -ENOENT;
+ exynos_fb->buf_cnt = i;
+ goto err_unreference;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+ exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ goto err_unreference;
+ }
+ }
+
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("failed to init framebuffer.\n");
+ goto err_unreference;
+ }
+
+ return &exynos_fb->fb;
+
+err_unreference:
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct drm_gem_object *obj;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ }
+err_free:
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+}
+
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index)
+{
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ struct exynos_drm_gem_buf *buffer;
+
+ if (index >= MAX_FB_BUFFER)
+ return NULL;
+
+ buffer = exynos_fb->exynos_gem_obj[index]->buffer;
+ if (!buffer)
+ return NULL;
+
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
+
+ return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
+ else
+ exynos_drm_fbdev_init(dev);
+}
+
+static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+ .fb_create = exynos_user_fb_create,
+ .output_poll_changed = exynos_drm_output_poll_changed,
+};
+
+void exynos_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+
+ dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
new file mode 100644
index 00000000000..517471b3756
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FB_H_
+#define _EXYNOS_DRM_FB_H
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+/* get memory information of a drm framebuffer */
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index);
+
+void exynos_drm_mode_config_init(struct drm_device *dev);
+
+/* set a buffer count to drm framebuffer. */
+void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
+ unsigned int cnt);
+
+/* get a buffer count to drm framebuffer. */
+unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
new file mode 100644
index 00000000000..d771b467cf0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -0,0 +1,371 @@
+/* exynos_drm_fbdev.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+
+#define MAX_CONNECTOR 4
+#define PREFERRED_BPP 32
+
+#define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
+ drm_fb_helper)
+
+struct exynos_drm_fbdev {
+ struct drm_fb_helper drm_fb_helper;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+};
+
+static int exynos_drm_fb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ unsigned long vm_size;
+ int ret;
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct fb_ops exynos_drm_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_mmap = exynos_drm_fb_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+ struct drm_framebuffer *fb)
+{
+ struct fb_info *fbi = helper->fbdev;
+ struct drm_device *dev = helper->dev;
+ struct exynos_drm_gem_buf *buffer;
+ unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
+ unsigned long offset;
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ /* RGB formats use only one buffer */
+ buffer = exynos_drm_fb_buffer(fb, 0);
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null.\n");
+ return -EFAULT;
+ }
+
+ /* map pages with kernel virtual space. */
+ if (!buffer->kvaddr) {
+ if (is_drm_iommu_supported(dev)) {
+ unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+
+ buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
+ nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ } else {
+ phys_addr_t dma_addr = buffer->dma_addr;
+ if (dma_addr)
+ buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
+ else
+ buffer->kvaddr = (void __iomem *)NULL;
+ }
+ if (!buffer->kvaddr) {
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
+ }
+ }
+
+ /* buffer count to framebuffer always is 1 at booting time. */
+ exynos_drm_fb_set_buf_cnt(fb, 1);
+
+ offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ fbi->screen_base = buffer->kvaddr + offset;
+ fbi->screen_size = size;
+
+ return 0;
+}
+
+static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_device *dev = helper->dev;
+ struct fb_info *fbi;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct platform_device *pdev = dev->platformdev;
+ unsigned long size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbi = framebuffer_alloc(0, &pdev->dev);
+ if (!fbi) {
+ DRM_ERROR("failed to allocate fb info.\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+ /*
+ * If physically contiguous memory allocation fails and if IOMMU is
+ * supported then try to get buffer from non physically contiguous
+ * memory area.
+ */
+ if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+ dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+ exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
+ size);
+ }
+
+ if (IS_ERR(exynos_gem_obj)) {
+ ret = PTR_ERR(exynos_gem_obj);
+ goto err_release_framebuffer;
+ }
+
+ exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+ helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+ &exynos_gem_obj->base);
+ if (IS_ERR(helper->fb)) {
+ DRM_ERROR("failed to create drm framebuffer.\n");
+ ret = PTR_ERR(helper->fb);
+ goto err_destroy_gem;
+ }
+
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &exynos_drm_fb_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("failed to allocate cmap.\n");
+ goto err_destroy_framebuffer;
+ }
+
+ ret = exynos_drm_fbdev_update(helper, helper->fb);
+ if (ret < 0)
+ goto err_dealloc_cmap;
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+ drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+ exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+ framebuffer_release(fbi);
+
+/*
+ * if failed, all resources allocated above would be released by
+ * drm_mode_config_cleanup() when drm_load() had been called prior
+ * to any specific driver such as fimd or hdmi driver.
+ */
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
+ .fb_probe = exynos_drm_fbdev_create,
+};
+
+static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ bool ret = false;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->status != connector_status_connected)
+ continue;
+
+ ret = true;
+ break;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+ struct exynos_drm_fbdev *fbdev;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+ unsigned int num_crtc;
+ int ret;
+
+ if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ return 0;
+
+ if (!exynos_drm_fbdev_is_anything_connected(dev))
+ return 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return -ENOMEM;
+
+ private->fb_helper = helper = &fbdev->drm_fb_helper;
+ helper->funcs = &exynos_drm_fb_helper_funcs;
+
+ num_crtc = dev->mode_config.num_crtc;
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize drm fb helper.\n");
+ goto err_init;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm_fb_helper_connector.\n");
+ goto err_setup;
+
+ }
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+ if (ret < 0) {
+ DRM_ERROR("failed to set up hw configuration.\n");
+ goto err_setup;
+ }
+
+ return 0;
+
+err_setup:
+ drm_fb_helper_fini(helper);
+
+err_init:
+ private->fb_helper = NULL;
+ kfree(fbdev);
+
+ return ret;
+}
+
+static void exynos_drm_fbdev_destroy(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper)
+{
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct drm_framebuffer *fb;
+
+ if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
+ vunmap(exynos_gem_obj->buffer->kvaddr);
+
+ /* release drm framebuffer and real buffer */
+ if (fb_helper->fb && fb_helper->fb->funcs) {
+ fb = fb_helper->fb;
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+ }
+ }
+
+ /* release linux framebuffer */
+ if (fb_helper->fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fb_helper->fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ drm_fb_helper_fini(fb_helper);
+}
+
+void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_fbdev *fbdev;
+
+ if (!private || !private->fb_helper)
+ return;
+
+ fbdev = to_exynos_fbdev(private->fb_helper);
+
+ if (fbdev->exynos_gem_obj)
+ exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
+
+ exynos_drm_fbdev_destroy(dev, private->fb_helper);
+ kfree(fbdev);
+ private->fb_helper = NULL;
+}
+
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ if (!private || !private->fb_helper)
+ return;
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
new file mode 100644
index 00000000000..e16d7f0ae19
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FBDEV_H_
+#define _EXYNOS_DRM_FBDEV_H_
+
+int exynos_drm_fbdev_init(struct drm_device *dev);
+int exynos_drm_fbdev_reinit(struct drm_device *dev);
+void exynos_drm_fbdev_fini(struct drm_device *dev);
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 00000000000..831dde9034c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,1901 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC stands for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#define FIMC_SHFACTOR 10
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_WIDTH_ITU_709 1280
+#define FIMC_REFRESH_MAX 60
+#define FIMC_REFRESH_MIN 12
+#define FIMC_CROP_MAX 8192
+#define FIMC_CROP_MIN 32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+enum {
+ FIMC_CLK_LCLK,
+ FIMC_CLK_GATE,
+ FIMC_CLK_WB_A,
+ FIMC_CLK_WB_B,
+ FIMC_CLK_MUX,
+ FIMC_CLK_PARENT,
+ FIMC_CLKS_MAX
+};
+
+static const char * const fimc_clock_names[] = {
+ [FIMC_CLK_LCLK] = "sclk_fimc",
+ [FIMC_CLK_GATE] = "fimc",
+ [FIMC_CLK_WB_A] = "pxl_async0",
+ [FIMC_CLK_WB_B] = "pxl_async1",
+ [FIMC_CLK_MUX] = "mux",
+ [FIMC_CLK_PARENT] = "parent",
+};
+
+#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @clocks: fimc clocks.
+ * @clk_frequency: LCLK clock frequency.
+ * @sysreg: handle to SYSREG block regmap.
+ * @sc: scaler infomations.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ spinlock_t lock;
+ struct clk *clocks[FIMC_CLKS_MAX];
+ u32 clk_frequency;
+ struct regmap *sysreg;
+ struct fimc_scaler sc;
+ struct exynos_drm_ipp_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+static u32 fimc_read(struct fimc_context *ctx, u32 reg)
+{
+ return readl(ctx->regs + reg);
+}
+
+static void fimc_write(struct fimc_context *ctx, u32 val, u32 reg)
+{
+ writel(val, ctx->regs + reg);
+}
+
+static void fimc_set_bits(struct fimc_context *ctx, u32 reg, u32 bits)
+{
+ void __iomem *r = ctx->regs + reg;
+
+ writel(readl(r) | bits, r);
+}
+
+static void fimc_clear_bits(struct fimc_context *ctx, u32 reg, u32 bits)
+{
+ void __iomem *r = ctx->regs + reg;
+
+ writel(readl(r) & ~bits, r);
+}
+
+static void fimc_sw_reset(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ /* stop dma operation */
+ cfg = fimc_read(ctx, EXYNOS_CISTATUS);
+ if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg))
+ fimc_clear_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
+
+ fimc_set_bits(ctx, EXYNOS_CISRCFMT, EXYNOS_CISRCFMT_ITU601_8BIT);
+
+ /* disable image capture */
+ fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
+ EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+
+ /* s/w reset */
+ fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
+
+ /* s/w reset complete */
+ fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
+
+ /* reset sequence */
+ fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
+ SYSREG_FIMD0WB_DEST_MASK,
+ ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("wb[%d]\n", wb);
+
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_ipp_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("inv_pclk[%d]inv_vsync[%d]\n",
+ pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n",
+ pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~EXYNOS_CIGCTRL_IRQ_OVFEN;
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE | EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~EXYNOS_CIGCTRL_IRQ_ENABLE;
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_CLR);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 status, flag;
+
+ status = fimc_read(ctx, EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("flag[0x%x]\n", flag);
+
+ if (status & flag) {
+ fimc_set_bits(ctx, EXYNOS_CIWDOFST,
+ EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+ fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
+ EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(ctx, EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(ctx, cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ cfg = fimc_read(ctx, EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("present[%d]before[%d]\n",
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+
+ /* RGB */
+ cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(ctx, EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(ctx, cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+
+ cfg = fimc_read(ctx, EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(ctx, cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg1, cfg2;
+
+ DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
+
+ cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
+ cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+ cfg2 = fimc_read(ctx, EXYNOS_CITRGFMT);
+ cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(ctx, cfg1, EXYNOS_MSCTRL);
+ fimc_write(ctx, cfg2, EXYNOS_CITRGFMT);
+ *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(ctx, EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(ctx, cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(ctx, cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n",
+ swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+ fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+ fimc_write(ctx, cfg, EXYNOS_CIIYOFF);
+ cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+ fimc_write(ctx, cfg, EXYNOS_CIICBOFF);
+ cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+ fimc_write(ctx, cfg, EXYNOS_CIICROFF);
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_SRC];
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+
+ /* RGB */
+ cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+
+ cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
+
+ cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
+ *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, cfg_ext, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 hfactor, vfactor;
+ int ret = 0;
+ u32 src_w, src_h, dst_w, dst_h;
+
+ cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
+ if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+ src_w = src->h;
+ src_h = src->w;
+ } else {
+ src_w = src->w;
+ src_h = src->h;
+ }
+
+ if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ /* fimc_ippdrv_check_property assures that dividers are not null */
+ hfactor = fls(src_w / dst_w / 2);
+ if (hfactor > FIMC_SHFACTOR / 2) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return -EINVAL;
+ }
+
+ vfactor = fls(src_h / dst_h / 2);
+ if (vfactor > FIMC_SHFACTOR / 2) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return -EINVAL;
+ }
+
+ pre_dst_width = src_w >> hfactor;
+ pre_dst_height = src_h >> vfactor;
+ DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor);
+
+ sc->hratio = (src_w << 14) / (dst_w << hfactor);
+ sc->vratio = (src_h << 14) / (dst_h << vfactor);
+ sc->up_h = (dst_w >= src_w) ? true : false;
+ sc->up_v = (dst_h >= src_h) ? true : false;
+ DRM_DEBUG_KMS("hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+ DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(1 << vfactor));
+ fimc_write(ctx, cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(ctx, cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
+ sc->hratio, sc->vratio);
+
+ cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(ctx, EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n",
+ swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(ctx, cfg, EXYNOS_CITAREA);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(ctx, cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(ctx, cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(ctx, cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_count(struct fimc_context *ctx)
+{
+ u32 cfg, buf_num;
+
+ cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
+
+ buf_num = hweight32(cfg);
+
+ DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
+
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ /* mask register set */
+ cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= (enable << buf_id);
+ fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
+ fimc_mask_irq(ctx, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP)
+ fimc_mask_irq(ctx, false);
+
+err_unlock:
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_DST];
+
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ if (enable) {
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = false;
+ } else {
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ int buf_id;
+
+ DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return IRQ_HANDLED;
+ }
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+ queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = FIMC_REFRESH_MIN;
+ prop_list->refresh_max = FIMC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+ (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = FIMC_CROP_MAX;
+ prop_list->crop_max.vsize = FIMC_CROP_MAX;
+ prop_list->crop_min.hsize = FIMC_CROP_MIN;
+ prop_list->crop_min.vsize = FIMC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+ prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+ prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+ prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+ return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("invalid flip\n");
+ return false;
+ }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!fimc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < FIMC_MAX_SRC; i++) {
+ fimc_write(ctx, 0, EXYNOS_CIIYSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIICBSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIICRSA(i));
+ }
+
+ for (i = 0; i < FIMC_MAX_DST; i++) {
+ fimc_write(ctx, 0, EXYNOS_CIOYSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIOCBSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIOCRSA(i));
+ }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ fimc_clear_addr(ctx);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ int ret, i;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("cmd[%d]\n", cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ fimc_mask_irq(ctx, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ ret = fimc_set_camblk_fimd0_wb(ctx);
+ if (ret < 0) {
+ dev_err(dev, "camblk setup failed.\n");
+ return ret;
+ }
+
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(ctx, EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(ctx, EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(ctx, cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(ctx, cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
+
+ fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
+
+ if (cmd == IPP_CMD_M2M) {
+ fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
+
+ fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("cmd[%d]\n", cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(ctx, EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(ctx, cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ fimc_mask_irq(ctx, false);
+
+ /* reset sequence */
+ fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ fimc_clear_bits(ctx, EXYNOS_CISCCTRL, EXYNOS_CISCCTRL_SCALERSTART);
+
+ /* Disable image capture */
+ fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
+ EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+
+ /* Enable frame end irq */
+ fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
+}
+
+static void fimc_put_clocks(struct fimc_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < FIMC_CLKS_MAX; i++) {
+ if (IS_ERR(ctx->clocks[i]))
+ continue;
+ clk_put(ctx->clocks[i]);
+ ctx->clocks[i] = ERR_PTR(-EINVAL);
+ }
+}
+
+static int fimc_setup_clocks(struct fimc_context *ctx)
+{
+ struct device *fimc_dev = ctx->ippdrv.dev;
+ struct device *dev;
+ int ret, i;
+
+ for (i = 0; i < FIMC_CLKS_MAX; i++)
+ ctx->clocks[i] = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < FIMC_CLKS_MAX; i++) {
+ if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B)
+ dev = fimc_dev->parent;
+ else
+ dev = fimc_dev;
+
+ ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
+ if (IS_ERR(ctx->clocks[i])) {
+ if (i >= FIMC_CLK_MUX)
+ break;
+ ret = PTR_ERR(ctx->clocks[i]);
+ dev_err(fimc_dev, "failed to get clock: %s\n",
+ fimc_clock_names[i]);
+ goto e_clk_free;
+ }
+ }
+
+ /* Optional FIMC LCLK parent clock setting */
+ if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
+ ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
+ ctx->clocks[FIMC_CLK_PARENT]);
+ if (ret < 0) {
+ dev_err(fimc_dev, "failed to set parent.\n");
+ goto e_clk_free;
+ }
+ }
+
+ ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
+ if (ret < 0)
+ goto e_clk_free;
+
+ ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
+ if (!ret)
+ return ret;
+e_clk_free:
+ fimc_put_clocks(ctx);
+ return ret;
+}
+
+static int fimc_parse_dt(struct fimc_context *ctx)
+{
+ struct device_node *node = ctx->ippdrv.dev->of_node;
+
+ /* Handle only devices that support the LCD Writeback data path */
+ if (!of_property_read_bool(node, "samsung,lcd-wb"))
+ return -ENODEV;
+
+ if (of_property_read_u32(node, "clock-frequency",
+ &ctx->clk_frequency))
+ ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY;
+
+ ctx->id = of_alias_get_id(node, "fimc");
+
+ if (ctx->id < 0) {
+ dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device tree node not found.\n");
+ return -ENODEV;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ippdrv.dev = dev;
+
+ ret = fimc_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(ctx->sysreg);
+ }
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ return -ENOENT;
+ }
+
+ ctx->irq = res->start;
+ ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ return ret;
+ }
+
+ ret = fimc_setup_clocks(ctx);
+ if (ret < 0)
+ return ret;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+ ret = fimc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_put_clk;
+ }
+
+ DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
+
+ spin_lock_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_pm_dis;
+ }
+
+ dev_info(dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_pm_dis:
+ pm_runtime_disable(dev);
+err_put_clk:
+ fimc_put_clocks(ctx);
+
+ return ret;
+}
+
+static int fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ fimc_put_clocks(ctx);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return fimc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ return fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+static const struct of_device_id fimc_of_match[] = {
+ { .compatible = "samsung,exynos4210-fimc" },
+ { .compatible = "samsung,exynos4212-fimc" },
+ { },
+};
+
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = fimc_remove,
+ .driver = {
+ .of_match_table = fimc_of_match,
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 00000000000..127a424c5fd
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
new file mode 100644
index 00000000000..33161ad3820
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -0,0 +1,1035 @@
+/* exynos_drm_fimd.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <drm/drmP.h>
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/component.h>
+
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <video/samsung_fimd.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * FIMD stands for Fully Interactive Mobile Display and
+ * as a display controller, it transfers contents drawn on memory
+ * to a LCD Panel through Display Interfaces such as RGB or
+ * CPU Interface.
+ */
+
+#define FIMD_DEFAULT_FRAMERATE 60
+#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
+
+/* position control register for hardware window 0, 2 ~ 4.*/
+#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
+#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
+/*
+ * size control register for hardware windows 0 and alpha control register
+ * for hardware windows 1 ~ 4
+ */
+#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16)
+/* size control register for hardware windows 1 ~ 2. */
+#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
+
+#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
+#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
+
+/* FIMD has totally five hardware windows. */
+#define WINDOWS_NR 5
+
+#define get_fimd_manager(mgr) platform_get_drvdata(to_platform_device(dev))
+
+struct fimd_driver_data {
+ unsigned int timing_base;
+
+ unsigned int has_shadowcon:1;
+ unsigned int has_clksel:1;
+ unsigned int has_limited_fmt:1;
+};
+
+static struct fimd_driver_data s3c64xx_fimd_driver_data = {
+ .timing_base = 0x0,
+ .has_clksel = 1,
+ .has_limited_fmt = 1,
+};
+
+static struct fimd_driver_data exynos4_fimd_driver_data = {
+ .timing_base = 0x0,
+ .has_shadowcon = 1,
+};
+
+static struct fimd_driver_data exynos5_fimd_driver_data = {
+ .timing_base = 0x20000,
+ .has_shadowcon = 1,
+};
+
+struct fimd_win_data {
+ unsigned int offset_x;
+ unsigned int offset_y;
+ unsigned int ovl_width;
+ unsigned int ovl_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int bpp;
+ unsigned int pixel_format;
+ dma_addr_t dma_addr;
+ unsigned int buf_offsize;
+ unsigned int line_size; /* bytes */
+ bool enabled;
+ bool resume;
+};
+
+struct fimd_context {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct clk *bus_clk;
+ struct clk *lcd_clk;
+ void __iomem *regs;
+ struct drm_display_mode mode;
+ struct fimd_win_data win_data[WINDOWS_NR];
+ unsigned int default_win;
+ unsigned long irq_flags;
+ u32 vidcon1;
+ bool suspended;
+ int pipe;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
+
+ struct exynos_drm_panel_info panel;
+ struct fimd_driver_data *driver_data;
+ struct exynos_drm_display *display;
+};
+
+static const struct of_device_id fimd_driver_dt_match[] = {
+ { .compatible = "samsung,s3c6400-fimd",
+ .data = &s3c64xx_fimd_driver_data },
+ { .compatible = "samsung,exynos4210-fimd",
+ .data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos5250-fimd",
+ .data = &exynos5_fimd_driver_data },
+ {},
+};
+
+static inline struct fimd_driver_data *drm_fimd_get_driver_data(
+ struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+ return (struct fimd_driver_data *)of_id->data;
+}
+
+static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for FIMD to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+
+static void fimd_clear_channel(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ int win, ch_enabled = 0;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* Check if any channel is enabled. */
+ for (win = 0; win < WINDOWS_NR; win++) {
+ u32 val = readl(ctx->regs + SHADOWCON);
+ if (val & SHADOWCON_CHx_ENABLE(win)) {
+ val &= ~SHADOWCON_CHx_ENABLE(win);
+ writel(val, ctx->regs + SHADOWCON);
+ ch_enabled = 1;
+ }
+ }
+
+ /* Wait for vsync, as disable channel takes effect at next vsync */
+ if (ch_enabled)
+ fimd_wait_for_vblank(mgr);
+}
+
+static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
+ struct drm_device *drm_dev)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct exynos_drm_private *priv;
+ priv = drm_dev->dev_private;
+
+ mgr->drm_dev = ctx->drm_dev = drm_dev;
+ mgr->pipe = ctx->pipe = priv->pipe++;
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = true, we can use the vblank feature.
+ *
+ * P.S. note that we wouldn't use drm irq handler but
+ * just specific driver own one instead because
+ * drm framework supports only one irq handler.
+ */
+ drm_dev->irq_enabled = true;
+
+ /*
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = true;
+
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(ctx->drm_dev)) {
+ /*
+ * If any channel is already active, iommu will throw
+ * a PAGE FAULT when enabled. So clear any channel if enabled.
+ */
+ fimd_clear_channel(mgr);
+ drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
+ }
+
+ return 0;
+}
+
+static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(ctx->drm_dev))
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+}
+
+static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
+ const struct drm_display_mode *mode)
+{
+ unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ u32 clkdiv;
+
+ /* Find the clock divider value that gets us closest to ideal_clk */
+ clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
+
+ return (clkdiv < 0x100) ? clkdiv : 0xff;
+}
+
+static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->vrefresh == 0)
+ adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
+
+ return true;
+}
+
+static void fimd_mode_set(struct exynos_drm_manager *mgr,
+ const struct drm_display_mode *in_mode)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ drm_mode_copy(&ctx->mode, in_mode);
+}
+
+static void fimd_commit(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct drm_display_mode *mode = &ctx->mode;
+ struct fimd_driver_data *driver_data;
+ u32 val, clkdiv, vidcon1;
+ int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+
+ driver_data = ctx->driver_data;
+ if (ctx->suspended)
+ return;
+
+ /* nothing to do if we haven't set the mode yet */
+ if (mode->htotal == 0 || mode->vtotal == 0)
+ return;
+
+ /* setup polarity values */
+ vidcon1 = ctx->vidcon1;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ vidcon1 |= VIDCON1_INV_VSYNC;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ vidcon1 |= VIDCON1_INV_HSYNC;
+ writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
+
+ /* setup vertical timing values. */
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
+ vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
+ val = VIDTCON0_VBPD(vbpd - 1) |
+ VIDTCON0_VFPD(vfpd - 1) |
+ VIDTCON0_VSPW(vsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
+
+ /* setup horizontal timing values. */
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
+ hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+ val = VIDTCON1_HBPD(hbpd - 1) |
+ VIDTCON1_HFPD(hfpd - 1) |
+ VIDTCON1_HSPW(hsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+
+ /* setup horizontal and vertical display size. */
+ val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
+ VIDTCON2_HOZVAL(mode->hdisplay - 1) |
+ VIDTCON2_LINEVAL_E(mode->vdisplay - 1) |
+ VIDTCON2_HOZVAL_E(mode->hdisplay - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
+
+ /*
+ * fields of register with prefix '_F' would be updated
+ * at vsync(same as dma start)
+ */
+ val = VIDCON0_ENVID | VIDCON0_ENVID_F;
+
+ if (ctx->driver_data->has_clksel)
+ val |= VIDCON0_CLKSEL_LCD;
+
+ clkdiv = fimd_calc_clkdiv(ctx, mode);
+ if (clkdiv > 1)
+ val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR;
+
+ writel(val, ctx->regs + VIDCON0);
+}
+
+static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ u32 val;
+
+ if (ctx->suspended)
+ return -EPERM;
+
+ if (!test_and_set_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+ val |= VIDINTCON0_INT_ENABLE;
+ val |= VIDINTCON0_INT_FRAME;
+
+ val &= ~VIDINTCON0_FRAMESEL0_MASK;
+ val |= VIDINTCON0_FRAMESEL0_VSYNC;
+ val &= ~VIDINTCON0_FRAMESEL1_MASK;
+ val |= VIDINTCON0_FRAMESEL1_NONE;
+
+ writel(val, ctx->regs + VIDINTCON0);
+ }
+
+ return 0;
+}
+
+static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ u32 val;
+
+ if (ctx->suspended)
+ return;
+
+ if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+ val &= ~VIDINTCON0_INT_FRAME;
+ val &= ~VIDINTCON0_INT_ENABLE;
+
+ writel(val, ctx->regs + VIDINTCON0);
+ }
+}
+
+static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_win_data *win_data;
+ int win;
+ unsigned long offset;
+
+ if (!overlay) {
+ DRM_ERROR("overlay is NULL\n");
+ return;
+ }
+
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ offset = overlay->fb_x * (overlay->bpp >> 3);
+ offset += overlay->fb_y * overlay->pitch;
+
+ DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+
+ win_data = &ctx->win_data[win];
+
+ win_data->offset_x = overlay->crtc_x;
+ win_data->offset_y = overlay->crtc_y;
+ win_data->ovl_width = overlay->crtc_width;
+ win_data->ovl_height = overlay->crtc_height;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+ win_data->dma_addr = overlay->dma_addr[0] + offset;
+ win_data->bpp = overlay->bpp;
+ win_data->pixel_format = overlay->pixel_format;
+ win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+ (overlay->bpp >> 3);
+ win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+
+ DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+ win_data->offset_x, win_data->offset_y);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
+ DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+ overlay->fb_width, overlay->crtc_width);
+}
+
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
+{
+ struct fimd_win_data *win_data = &ctx->win_data[win];
+ unsigned long val;
+
+ val = WINCONx_ENWIN;
+
+ /*
+ * In case of s3c64xx, window 0 doesn't support alpha channel.
+ * So the request format is ARGB8888 then change it to XRGB8888.
+ */
+ if (ctx->driver_data->has_limited_fmt && !win) {
+ if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
+ win_data->pixel_format = DRM_FORMAT_XRGB8888;
+ }
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_C8:
+ val |= WINCON0_BPPMODE_8BPP_PALETTE;
+ val |= WINCONx_BURSTLEN_8WORD;
+ val |= WINCONx_BYTSWP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ val |= WINCON0_BPPMODE_16BPP_1555;
+ val |= WINCONx_HAWSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_RGB565:
+ val |= WINCON0_BPPMODE_16BPP_565;
+ val |= WINCONx_HAWSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= WINCON0_BPPMODE_24BPP_888;
+ val |= WINCONx_WSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ val |= WINCON1_BPPMODE_25BPP_A1888
+ | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
+ val |= WINCONx_WSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ default:
+ DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
+
+ val |= WINCON0_BPPMODE_24BPP_888;
+ val |= WINCONx_WSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ }
+
+ DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+
+ /*
+ * In case of exynos, setting dma-burst to 16Word causes permanent
+ * tearing for very small buffers, e.g. cursor buffer. Burst Mode
+ * switching which is based on overlay size is not recommended as
+ * overlay size varies alot towards the end of the screen and rapid
+ * movement causes unstable DMA which results into iommu crash/tear.
+ */
+
+ if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ val &= ~WINCONx_BURSTLEN_MASK;
+ val |= WINCONx_BURSTLEN_4WORD;
+ }
+
+ writel(val, ctx->regs + WINCON(win));
+}
+
+static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
+{
+ unsigned int keycon0 = 0, keycon1 = 0;
+
+ keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
+ WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
+
+ keycon1 = WxKEYCON1_COLVAL(0xffffffff);
+
+ writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
+ writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+}
+
+/**
+ * shadow_protect_win() - disable updating values from shadow registers at vsync
+ *
+ * @win: window to protect registers for
+ * @protect: 1 to protect (disable updates)
+ */
+static void fimd_shadow_protect_win(struct fimd_context *ctx,
+ int win, bool protect)
+{
+ u32 reg, bits, val;
+
+ if (ctx->driver_data->has_shadowcon) {
+ reg = SHADOWCON;
+ bits = SHADOWCON_WINx_PROTECT(win);
+ } else {
+ reg = PRTCON;
+ bits = PRTCON_PROTECT;
+ }
+
+ val = readl(ctx->regs + reg);
+ if (protect)
+ val |= bits;
+ else
+ val &= ~bits;
+ writel(val, ctx->regs + reg);
+}
+
+static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_win_data *win_data;
+ int win = zpos;
+ unsigned long val, alpha, size;
+ unsigned int last_x;
+ unsigned int last_y;
+
+ if (ctx->suspended)
+ return;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ /* If suspended, enable this on resume */
+ if (ctx->suspended) {
+ win_data->resume = true;
+ return;
+ }
+
+ /*
+ * SHADOWCON/PRTCON register is used for enabling timing.
+ *
+ * for example, once only width value of a register is set,
+ * if the dma is started then fimd hardware could malfunction so
+ * with protect window setting, the register fields with prefix '_F'
+ * wouldn't be updated at vsync also but updated once unprotect window
+ * is set.
+ */
+
+ /* protect windows */
+ fimd_shadow_protect_win(ctx, win, true);
+
+ /* buffer start address */
+ val = (unsigned long)win_data->dma_addr;
+ writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
+
+ /* buffer end address */
+ size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+ val = (unsigned long)(win_data->dma_addr + size);
+ writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
+
+ DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
+ (unsigned long)win_data->dma_addr, val, size);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+
+ /* buffer size */
+ val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
+ writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
+
+ /* OSD position */
+ val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+ VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
+ writel(val, ctx->regs + VIDOSD_A(win));
+
+ last_x = win_data->offset_x + win_data->ovl_width;
+ if (last_x)
+ last_x--;
+ last_y = win_data->offset_y + win_data->ovl_height;
+ if (last_y)
+ last_y--;
+
+ val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+ VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
+ writel(val, ctx->regs + VIDOSD_B(win));
+
+ DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ win_data->offset_x, win_data->offset_y, last_x, last_y);
+
+ /* hardware window 0 doesn't support alpha channel. */
+ if (win != 0) {
+ /* OSD alpha */
+ alpha = VIDISD14C_ALPHA1_R(0xf) |
+ VIDISD14C_ALPHA1_G(0xf) |
+ VIDISD14C_ALPHA1_B(0xf);
+
+ writel(alpha, ctx->regs + VIDOSD_C(win));
+ }
+
+ /* OSD size */
+ if (win != 3 && win != 4) {
+ u32 offset = VIDOSD_D(win);
+ if (win == 0)
+ offset = VIDOSD_C(win);
+ val = win_data->ovl_width * win_data->ovl_height;
+ writel(val, ctx->regs + offset);
+
+ DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
+ }
+
+ fimd_win_set_pixfmt(ctx, win);
+
+ /* hardware window 0 doesn't support color key. */
+ if (win != 0)
+ fimd_win_set_colkey(ctx, win);
+
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val |= WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
+ /* Enable DMA channel and unprotect windows */
+ fimd_shadow_protect_win(ctx, win, false);
+
+ if (ctx->driver_data->has_shadowcon) {
+ val = readl(ctx->regs + SHADOWCON);
+ val |= SHADOWCON_CHx_ENABLE(win);
+ writel(val, ctx->regs + SHADOWCON);
+ }
+
+ win_data->enabled = true;
+}
+
+static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_win_data *win_data;
+ int win = zpos;
+ u32 val;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ if (ctx->suspended) {
+ /* do not resume this window*/
+ win_data->resume = false;
+ return;
+ }
+
+ /* protect windows */
+ fimd_shadow_protect_win(ctx, win, true);
+
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val &= ~WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
+ /* unprotect windows */
+ if (ctx->driver_data->has_shadowcon) {
+ val = readl(ctx->regs + SHADOWCON);
+ val &= ~SHADOWCON_CHx_ENABLE(win);
+ writel(val, ctx->regs + SHADOWCON);
+ }
+
+ fimd_shadow_protect_win(ctx, win, false);
+
+ win_data->enabled = false;
+}
+
+static void fimd_window_suspend(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ if (win_data->enabled)
+ fimd_win_disable(mgr, i);
+ }
+ fimd_wait_for_vblank(mgr);
+}
+
+static void fimd_window_resume(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
+static void fimd_apply(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ if (win_data->enabled)
+ fimd_win_commit(mgr, i);
+ else
+ fimd_win_disable(mgr, i);
+ }
+
+ fimd_commit(mgr);
+}
+
+static int fimd_poweron(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ int ret;
+
+ if (!ctx->suspended)
+ return 0;
+
+ ctx->suspended = false;
+
+ pm_runtime_get_sync(ctx->dev);
+
+ ret = clk_prepare_enable(ctx->bus_clk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
+ goto bus_clk_err;
+ }
+
+ ret = clk_prepare_enable(ctx->lcd_clk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
+ goto lcd_clk_err;
+ }
+
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ ret = fimd_enable_vblank(mgr);
+ if (ret) {
+ DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
+ goto enable_vblank_err;
+ }
+ }
+
+ fimd_window_resume(mgr);
+
+ fimd_apply(mgr);
+
+ return 0;
+
+enable_vblank_err:
+ clk_disable_unprepare(ctx->lcd_clk);
+lcd_clk_err:
+ clk_disable_unprepare(ctx->bus_clk);
+bus_clk_err:
+ ctx->suspended = true;
+ return ret;
+}
+
+static int fimd_poweroff(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ if (ctx->suspended)
+ return 0;
+
+ /*
+ * We need to make sure that all windows are disabled before we
+ * suspend that connector. Otherwise we might try to scan from
+ * a destroyed buffer later.
+ */
+ fimd_window_suspend(mgr);
+
+ clk_disable_unprepare(ctx->lcd_clk);
+ clk_disable_unprepare(ctx->bus_clk);
+
+ pm_runtime_put_sync(ctx->dev);
+
+ ctx->suspended = true;
+ return 0;
+}
+
+static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
+{
+ DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ fimd_poweron(mgr);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ fimd_poweroff(mgr);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_drm_manager_ops fimd_manager_ops = {
+ .dpms = fimd_dpms,
+ .mode_fixup = fimd_mode_fixup,
+ .mode_set = fimd_mode_set,
+ .commit = fimd_commit,
+ .enable_vblank = fimd_enable_vblank,
+ .disable_vblank = fimd_disable_vblank,
+ .wait_for_vblank = fimd_wait_for_vblank,
+ .win_mode_set = fimd_win_mode_set,
+ .win_commit = fimd_win_commit,
+ .win_disable = fimd_win_disable,
+};
+
+static struct exynos_drm_manager fimd_manager = {
+ .type = EXYNOS_DISPLAY_TYPE_LCD,
+ .ops = &fimd_manager_ops,
+};
+
+static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
+{
+ struct fimd_context *ctx = (struct fimd_context *)dev_id;
+ u32 val;
+
+ val = readl(ctx->regs + VIDINTCON1);
+
+ if (val & VIDINTCON1_INT_FRAME)
+ /* VSYNC interrupt */
+ writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+
+ /* check the crtc is detached already from encoder */
+ if (ctx->pipe < 0 || !ctx->drm_dev)
+ goto out;
+
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static int fimd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct fimd_context *ctx = fimd_manager.ctx;
+ struct drm_device *drm_dev = data;
+
+ fimd_mgr_initialize(&fimd_manager, drm_dev);
+ exynos_drm_crtc_create(&fimd_manager);
+ if (ctx->display)
+ exynos_drm_create_enc_conn(drm_dev, ctx->display);
+
+ return 0;
+
+}
+
+static void fimd_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
+ struct fimd_context *ctx = fimd_manager.ctx;
+ struct drm_crtc *crtc = mgr->crtc;
+
+ fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
+
+ if (ctx->display)
+ exynos_dpi_remove(dev);
+
+ fimd_mgr_remove(mgr);
+
+ crtc->funcs->destroy(crtc);
+}
+
+static const struct component_ops fimd_component_ops = {
+ .bind = fimd_bind,
+ .unbind = fimd_unbind,
+};
+
+static int fimd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimd_context *ctx;
+ struct resource *res;
+ int ret = -EINVAL;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
+ fimd_manager.type);
+ if (ret)
+ return ret;
+
+ if (!dev->of_node) {
+ ret = -ENODEV;
+ goto err_del_component;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err_del_component;
+ }
+
+ ctx->dev = dev;
+ ctx->suspended = true;
+
+ if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
+ ctx->vidcon1 |= VIDCON1_INV_VDEN;
+ if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
+ ctx->vidcon1 |= VIDCON1_INV_VCLK;
+
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
+ if (IS_ERR(ctx->bus_clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ ret = PTR_ERR(ctx->bus_clk);
+ goto err_del_component;
+ }
+
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+ if (IS_ERR(ctx->lcd_clk)) {
+ dev_err(dev, "failed to get lcd clock\n");
+ ret = PTR_ERR(ctx->lcd_clk);
+ goto err_del_component;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ctx->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->regs)) {
+ ret = PTR_ERR(ctx->regs);
+ goto err_del_component;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
+ if (!res) {
+ dev_err(dev, "irq request failed.\n");
+ ret = -ENXIO;
+ goto err_del_component;
+ }
+
+ ret = devm_request_irq(dev, res->start, fimd_irq_handler,
+ 0, "drm_fimd", ctx);
+ if (ret) {
+ dev_err(dev, "irq request failed.\n");
+ goto err_del_component;
+ }
+
+ ctx->driver_data = drm_fimd_get_driver_data(pdev);
+ init_waitqueue_head(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
+
+ platform_set_drvdata(pdev, &fimd_manager);
+
+ fimd_manager.ctx = ctx;
+
+ ctx->display = exynos_dpi_probe(dev);
+ if (IS_ERR(ctx->display))
+ return PTR_ERR(ctx->display);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = component_add(&pdev->dev, &fimd_component_ops);
+ if (ret)
+ goto err_disable_pm_runtime;
+
+ return ret;
+
+err_disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+err_del_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+ return ret;
+}
+
+static int fimd_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ component_del(&pdev->dev, &fimd_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+ return 0;
+}
+
+struct platform_driver fimd_driver = {
+ .probe = fimd_probe,
+ .remove = fimd_remove,
+ .driver = {
+ .name = "exynos4-fb",
+ .owner = THIS_MODULE,
+ .of_match_table = fimd_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
new file mode 100644
index 00000000000..80015871447
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -0,0 +1,1559 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
+#include <linux/of.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_g2d.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+
+#define G2D_HW_MAJOR_VER 4
+#define G2D_HW_MINOR_VER 1
+
+/* vaild register range set from user: 0x0104 ~ 0x0880 */
+#define G2D_VALID_START 0x0104
+#define G2D_VALID_END 0x0880
+
+/* general registers */
+#define G2D_SOFT_RESET 0x0000
+#define G2D_INTEN 0x0004
+#define G2D_INTC_PEND 0x000C
+#define G2D_DMA_SFR_BASE_ADDR 0x0080
+#define G2D_DMA_COMMAND 0x0084
+#define G2D_DMA_STATUS 0x008C
+#define G2D_DMA_HOLD_CMD 0x0090
+
+/* command registers */
+#define G2D_BITBLT_START 0x0100
+
+/* registers for base address */
+#define G2D_SRC_BASE_ADDR 0x0304
+#define G2D_SRC_COLOR_MODE 0x030C
+#define G2D_SRC_LEFT_TOP 0x0310
+#define G2D_SRC_RIGHT_BOTTOM 0x0314
+#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
+#define G2D_DST_BASE_ADDR 0x0404
+#define G2D_DST_COLOR_MODE 0x040C
+#define G2D_DST_LEFT_TOP 0x0410
+#define G2D_DST_RIGHT_BOTTOM 0x0414
+#define G2D_DST_PLANE2_BASE_ADDR 0x0418
+#define G2D_PAT_BASE_ADDR 0x0500
+#define G2D_MSK_BASE_ADDR 0x0520
+
+/* G2D_SOFT_RESET */
+#define G2D_SFRCLEAR (1 << 1)
+#define G2D_R (1 << 0)
+
+/* G2D_INTEN */
+#define G2D_INTEN_ACF (1 << 3)
+#define G2D_INTEN_UCF (1 << 2)
+#define G2D_INTEN_GCF (1 << 1)
+#define G2D_INTEN_SCF (1 << 0)
+
+/* G2D_INTC_PEND */
+#define G2D_INTP_ACMD_FIN (1 << 3)
+#define G2D_INTP_UCMD_FIN (1 << 2)
+#define G2D_INTP_GCMD_FIN (1 << 1)
+#define G2D_INTP_SCMD_FIN (1 << 0)
+
+/* G2D_DMA_COMMAND */
+#define G2D_DMA_HALT (1 << 2)
+#define G2D_DMA_CONTINUE (1 << 1)
+#define G2D_DMA_START (1 << 0)
+
+/* G2D_DMA_STATUS */
+#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
+#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
+#define G2D_DMA_DONE (1 << 0)
+#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
+
+/* G2D_DMA_HOLD_CMD */
+#define G2D_USER_HOLD (1 << 2)
+#define G2D_LIST_HOLD (1 << 1)
+#define G2D_BITBLT_HOLD (1 << 0)
+
+/* G2D_BITBLT_START */
+#define G2D_START_CASESEL (1 << 2)
+#define G2D_START_NHOLT (1 << 1)
+#define G2D_START_BITBLT (1 << 0)
+
+/* buffer color format */
+#define G2D_FMT_XRGB8888 0
+#define G2D_FMT_ARGB8888 1
+#define G2D_FMT_RGB565 2
+#define G2D_FMT_XRGB1555 3
+#define G2D_FMT_ARGB1555 4
+#define G2D_FMT_XRGB4444 5
+#define G2D_FMT_ARGB4444 6
+#define G2D_FMT_PACKED_RGB888 7
+#define G2D_FMT_A8 11
+#define G2D_FMT_L8 12
+
+/* buffer valid length */
+#define G2D_LEN_MIN 1
+#define G2D_LEN_MAX 8000
+
+#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
+#define G2D_CMDLIST_NUM 64
+#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
+#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL (64 * 1024 * 1024)
+
+enum {
+ BUF_TYPE_GEM = 1,
+ BUF_TYPE_USERPTR,
+};
+
+enum g2d_reg_type {
+ REG_TYPE_NONE = -1,
+ REG_TYPE_SRC,
+ REG_TYPE_SRC_PLANE2,
+ REG_TYPE_DST,
+ REG_TYPE_DST_PLANE2,
+ REG_TYPE_PAT,
+ REG_TYPE_MSK,
+ MAX_REG_TYPE_NR
+};
+
+/* cmdlist data structure */
+struct g2d_cmdlist {
+ u32 head;
+ unsigned long data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
+};
+
+/*
+ * A structure of buffer description
+ *
+ * @format: color format
+ * @left_x: the x coordinates of left top corner
+ * @top_y: the y coordinates of left top corner
+ * @right_x: the x coordinates of right bottom corner
+ * @bottom_y: the y coordinates of right bottom corner
+ *
+ */
+struct g2d_buf_desc {
+ unsigned int format;
+ unsigned int left_x;
+ unsigned int top_y;
+ unsigned int right_x;
+ unsigned int bottom_y;
+};
+
+/*
+ * A structure of buffer information
+ *
+ * @map_nr: manages the number of mapped buffers
+ * @reg_types: stores regitster type in the order of requested command
+ * @handles: stores buffer handle in its reg_type position
+ * @types: stores buffer type in its reg_type position
+ * @descs: stores buffer description in its reg_type position
+ *
+ */
+struct g2d_buf_info {
+ unsigned int map_nr;
+ enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
+ unsigned long handles[MAX_REG_TYPE_NR];
+ unsigned int types[MAX_REG_TYPE_NR];
+ struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
+};
+
+struct drm_exynos_pending_g2d_event {
+ struct drm_pending_event base;
+ struct drm_exynos_g2d_event event;
+};
+
+struct g2d_cmdlist_userptr {
+ struct list_head list;
+ dma_addr_t dma_addr;
+ unsigned long userptr;
+ unsigned long size;
+ struct page **pages;
+ unsigned int npages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ atomic_t refcount;
+ bool in_pool;
+ bool out_of_list;
+};
+struct g2d_cmdlist_node {
+ struct list_head list;
+ struct g2d_cmdlist *cmdlist;
+ dma_addr_t dma_addr;
+ struct g2d_buf_info buf_info;
+
+ struct drm_exynos_pending_g2d_event *event;
+};
+
+struct g2d_runqueue_node {
+ struct list_head list;
+ struct list_head run_cmdlist;
+ struct list_head event_list;
+ struct drm_file *filp;
+ pid_t pid;
+ struct completion complete;
+ int async;
+};
+
+struct g2d_data {
+ struct device *dev;
+ struct clk *gate_clk;
+ void __iomem *regs;
+ int irq;
+ struct workqueue_struct *g2d_workq;
+ struct work_struct runqueue_work;
+ struct exynos_drm_subdrv subdrv;
+ bool suspended;
+
+ /* cmdlist */
+ struct g2d_cmdlist_node *cmdlist_node;
+ struct list_head free_cmdlist;
+ struct mutex cmdlist_mutex;
+ dma_addr_t cmdlist_pool;
+ void *cmdlist_pool_virt;
+ struct dma_attrs cmdlist_dma_attrs;
+
+ /* runqueue*/
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head runqueue;
+ struct mutex runqueue_mutex;
+ struct kmem_cache *runqueue_slab;
+
+ unsigned long current_pool;
+ unsigned long max_pool;
+};
+
+static int g2d_init_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ int nr;
+ int ret;
+ struct g2d_buf_info *buf_info;
+
+ init_dma_attrs(&g2d->cmdlist_dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+ G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL,
+ &g2d->cmdlist_dma_attrs);
+ if (!g2d->cmdlist_pool_virt) {
+ dev_err(dev, "failed to allocate dma memory\n");
+ return -ENOMEM;
+ }
+
+ node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ dev_err(dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
+ unsigned int i;
+
+ node[nr].cmdlist =
+ g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
+ node[nr].dma_addr =
+ g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
+
+ buf_info = &node[nr].buf_info;
+ for (i = 0; i < MAX_REG_TYPE_NR; i++)
+ buf_info->reg_types[i] = REG_TYPE_NONE;
+
+ list_add_tail(&node[nr].list, &g2d->free_cmdlist);
+ }
+
+ return 0;
+
+err:
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ return ret;
+}
+
+static void g2d_fini_cmdlist(struct g2d_data *g2d)
+{
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+
+ kfree(g2d->cmdlist_node);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+}
+
+static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ if (list_empty(&g2d->free_cmdlist)) {
+ dev_err(dev, "there is no free cmdlist\n");
+ mutex_unlock(&g2d->cmdlist_mutex);
+ return NULL;
+ }
+
+ node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
+ list);
+ list_del_init(&node->list);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ return node;
+}
+
+static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
+{
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+}
+
+static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+ struct g2d_cmdlist_node *node)
+{
+ struct g2d_cmdlist_node *lnode;
+
+ if (list_empty(&g2d_priv->inuse_cmdlist))
+ goto add_to_list;
+
+ /* this links to base address of new cmdlist */
+ lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ struct g2d_cmdlist_node, list);
+ lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
+
+add_to_list:
+ list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+
+ if (node->event)
+ list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+}
+
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+ unsigned long obj,
+ bool force)
+{
+ struct g2d_cmdlist_userptr *g2d_userptr =
+ (struct g2d_cmdlist_userptr *)obj;
+
+ if (!obj)
+ return;
+
+ if (force)
+ goto out;
+
+ atomic_dec(&g2d_userptr->refcount);
+
+ if (atomic_read(&g2d_userptr->refcount) > 0)
+ return;
+
+ if (g2d_userptr->in_pool)
+ return;
+
+out:
+ exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+ exynos_gem_put_vma(g2d_userptr->vma);
+
+ if (!g2d_userptr->out_of_list)
+ list_del_init(&g2d_userptr->list);
+
+ sg_free_table(g2d_userptr->sgt);
+ kfree(g2d_userptr->sgt);
+
+ drm_free_large(g2d_userptr->pages);
+ kfree(g2d_userptr);
+}
+
+static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+ unsigned long userptr,
+ unsigned long size,
+ struct drm_file *filp,
+ unsigned long *obj)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr;
+ struct g2d_data *g2d;
+ struct page **pages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ unsigned int npages, offset;
+ int ret;
+
+ if (!size) {
+ DRM_ERROR("invalid userptr size.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ g2d = dev_get_drvdata(g2d_priv->dev);
+
+ /* check if userptr already exists in userptr_list. */
+ list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ if (g2d_userptr->userptr == userptr) {
+ /*
+ * also check size because there could be same address
+ * and different size.
+ */
+ if (g2d_userptr->size == size) {
+ atomic_inc(&g2d_userptr->refcount);
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+ }
+
+ /*
+ * at this moment, maybe g2d dma is accessing this
+ * g2d_userptr memory region so just remove this
+ * g2d_userptr object from userptr_list not to be
+ * referred again and also except it the userptr
+ * pool to be released after the dma access completion.
+ */
+ g2d_userptr->out_of_list = true;
+ g2d_userptr->in_pool = false;
+ list_del_init(&g2d_userptr->list);
+
+ break;
+ }
+ }
+
+ g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+ if (!g2d_userptr)
+ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&g2d_userptr->refcount, 1);
+
+ start = userptr & PAGE_MASK;
+ offset = userptr & ~PAGE_MASK;
+ end = PAGE_ALIGN(userptr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+ g2d_userptr->npages = npages;
+
+ pages = drm_calloc_large(npages, sizeof(struct page *));
+ if (!pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, userptr);
+ if (!vma) {
+ up_read(&current->mm->mmap_sem);
+ DRM_ERROR("failed to get vm region.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ if (vma->vm_end < userptr + size) {
+ up_read(&current->mm->mmap_sem);
+ DRM_ERROR("vma is too small.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->vma = exynos_gem_get_vma(vma);
+ if (!g2d_userptr->vma) {
+ up_read(&current->mm->mmap_sem);
+ DRM_ERROR("failed to copy vma.\n");
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->size = size;
+
+ ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+ npages, pages, vma);
+ if (ret < 0) {
+ up_read(&current->mm->mmap_sem);
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ goto err_put_vma;
+ }
+
+ up_read(&current->mm->mmap_sem);
+ g2d_userptr->pages = pages;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto err_free_userptr;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+ size, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to get sgt from pages.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->sgt = sgt;
+
+ ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+ if (ret < 0) {
+ DRM_ERROR("failed to map sgt with dma region.\n");
+ goto err_sg_free_table;
+ }
+
+ g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+ g2d_userptr->userptr = userptr;
+
+ list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+ if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+ g2d->current_pool += npages << PAGE_SHIFT;
+ g2d_userptr->in_pool = true;
+ }
+
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+
+err_sg_free_table:
+ sg_free_table(sgt);
+
+err_free_sgt:
+ kfree(sgt);
+
+err_free_userptr:
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+err_put_vma:
+ exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+ drm_free_large(pages);
+
+err_free:
+ kfree(g2d_userptr);
+
+ return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+ struct g2d_data *g2d,
+ struct drm_file *filp)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+ list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ if (g2d_userptr->in_pool)
+ g2d_userptr_put_dma_addr(drm_dev,
+ (unsigned long)g2d_userptr,
+ true);
+
+ g2d->current_pool = 0;
+}
+
+static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
+{
+ enum g2d_reg_type reg_type;
+
+ switch (reg_offset) {
+ case G2D_SRC_BASE_ADDR:
+ case G2D_SRC_COLOR_MODE:
+ case G2D_SRC_LEFT_TOP:
+ case G2D_SRC_RIGHT_BOTTOM:
+ reg_type = REG_TYPE_SRC;
+ break;
+ case G2D_SRC_PLANE2_BASE_ADDR:
+ reg_type = REG_TYPE_SRC_PLANE2;
+ break;
+ case G2D_DST_BASE_ADDR:
+ case G2D_DST_COLOR_MODE:
+ case G2D_DST_LEFT_TOP:
+ case G2D_DST_RIGHT_BOTTOM:
+ reg_type = REG_TYPE_DST;
+ break;
+ case G2D_DST_PLANE2_BASE_ADDR:
+ reg_type = REG_TYPE_DST_PLANE2;
+ break;
+ case G2D_PAT_BASE_ADDR:
+ reg_type = REG_TYPE_PAT;
+ break;
+ case G2D_MSK_BASE_ADDR:
+ reg_type = REG_TYPE_MSK;
+ break;
+ default:
+ reg_type = REG_TYPE_NONE;
+ DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
+ break;
+ }
+
+ return reg_type;
+}
+
+static unsigned long g2d_get_buf_bpp(unsigned int format)
+{
+ unsigned long bpp;
+
+ switch (format) {
+ case G2D_FMT_XRGB8888:
+ case G2D_FMT_ARGB8888:
+ bpp = 4;
+ break;
+ case G2D_FMT_RGB565:
+ case G2D_FMT_XRGB1555:
+ case G2D_FMT_ARGB1555:
+ case G2D_FMT_XRGB4444:
+ case G2D_FMT_ARGB4444:
+ bpp = 2;
+ break;
+ case G2D_FMT_PACKED_RGB888:
+ bpp = 3;
+ break;
+ default:
+ bpp = 1;
+ break;
+ }
+
+ return bpp;
+}
+
+static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
+ enum g2d_reg_type reg_type,
+ unsigned long size)
+{
+ unsigned int width, height;
+ unsigned long area;
+
+ /*
+ * check source and destination buffers only.
+ * so the others are always valid.
+ */
+ if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
+ return true;
+
+ width = buf_desc->right_x - buf_desc->left_x;
+ if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
+ DRM_ERROR("width[%u] is out of range!\n", width);
+ return false;
+ }
+
+ height = buf_desc->bottom_y - buf_desc->top_y;
+ if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
+ DRM_ERROR("height[%u] is out of range!\n", height);
+ return false;
+ }
+
+ area = (unsigned long)width * (unsigned long)height *
+ g2d_get_buf_bpp(buf_desc->format);
+ if (area > size) {
+ DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+ return false;
+ }
+
+ return true;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_device *drm_dev,
+ struct drm_file *file)
+{
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
+ struct g2d_buf_info *buf_info = &node->buf_info;
+ int offset;
+ int ret;
+ int i;
+
+ for (i = 0; i < buf_info->map_nr; i++) {
+ struct g2d_buf_desc *buf_desc;
+ enum g2d_reg_type reg_type;
+ int reg_pos;
+ unsigned long handle;
+ dma_addr_t *addr;
+
+ reg_pos = cmdlist->last - 2 * (i + 1);
+
+ offset = cmdlist->data[reg_pos];
+ handle = cmdlist->data[reg_pos + 1];
+
+ reg_type = g2d_get_reg_type(offset);
+ if (reg_type == REG_TYPE_NONE) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ buf_desc = &buf_info->descs[reg_type];
+
+ if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
+ unsigned long size;
+
+ size = exynos_drm_gem_get_size(drm_dev, handle, file);
+ if (!size) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+ size)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+ file);
+ if (IS_ERR(addr)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ } else {
+ struct drm_exynos_g2d_userptr g2d_userptr;
+
+ if (copy_from_user(&g2d_userptr, (void __user *)handle,
+ sizeof(struct drm_exynos_g2d_userptr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+ g2d_userptr.size)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ addr = g2d_userptr_get_dma_addr(drm_dev,
+ g2d_userptr.userptr,
+ g2d_userptr.size,
+ file,
+ &handle);
+ if (IS_ERR(addr)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+
+ cmdlist->data[reg_pos + 1] = *addr;
+ buf_info->reg_types[i] = reg_type;
+ buf_info->handles[reg_type] = handle;
+ }
+
+ return 0;
+
+err:
+ buf_info->map_nr = i;
+ return ret;
+}
+
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_file *filp)
+{
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ struct g2d_buf_info *buf_info = &node->buf_info;
+ int i;
+
+ for (i = 0; i < buf_info->map_nr; i++) {
+ struct g2d_buf_desc *buf_desc;
+ enum g2d_reg_type reg_type;
+ unsigned long handle;
+
+ reg_type = buf_info->reg_types[i];
+
+ buf_desc = &buf_info->descs[reg_type];
+ handle = buf_info->handles[reg_type];
+
+ if (buf_info->types[reg_type] == BUF_TYPE_GEM)
+ exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+ filp);
+ else
+ g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+ false);
+
+ buf_info->reg_types[i] = REG_TYPE_NONE;
+ buf_info->handles[reg_type] = 0;
+ buf_info->types[reg_type] = 0;
+ memset(buf_desc, 0x00, sizeof(*buf_desc));
+ }
+
+ buf_info->map_nr = 0;
+}
+
+static void g2d_dma_start(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ struct g2d_cmdlist_node *node =
+ list_first_entry(&runqueue_node->run_cmdlist,
+ struct g2d_cmdlist_node, list);
+ int ret;
+
+ ret = pm_runtime_get_sync(g2d->dev);
+ if (ret < 0)
+ return;
+
+ writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
+ writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
+}
+
+static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
+{
+ struct g2d_runqueue_node *runqueue_node;
+
+ if (list_empty(&g2d->runqueue))
+ return NULL;
+
+ runqueue_node = list_first_entry(&g2d->runqueue,
+ struct g2d_runqueue_node, list);
+ list_del_init(&runqueue_node->list);
+ return runqueue_node;
+}
+
+static void g2d_free_runqueue_node(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ struct g2d_cmdlist_node *node;
+
+ if (!runqueue_node)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ /*
+ * commands in run_cmdlist have been completed so unmap all gem
+ * objects in each command node so that they are unreferenced.
+ */
+ list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+ g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
+ list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+}
+
+static void g2d_exec_runqueue(struct g2d_data *g2d)
+{
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ if (g2d->runqueue_node)
+ g2d_dma_start(g2d, g2d->runqueue_node);
+}
+
+static void g2d_runqueue_worker(struct work_struct *work)
+{
+ struct g2d_data *g2d = container_of(work, struct g2d_data,
+ runqueue_work);
+
+ mutex_lock(&g2d->runqueue_mutex);
+ pm_runtime_put_sync(g2d->dev);
+
+ complete(&g2d->runqueue_node->complete);
+ if (g2d->runqueue_node->async)
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+
+ if (g2d->suspended)
+ g2d->runqueue_node = NULL;
+ else
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+}
+
+static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
+{
+ struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
+ struct drm_exynos_pending_g2d_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ if (list_empty(&runqueue_node->event_list))
+ return;
+
+ e = list_first_entry(&runqueue_node->event_list,
+ struct drm_exynos_pending_g2d_event, base.link);
+
+ do_gettimeofday(&now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.cmdlist_no = cmdlist_no;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
+{
+ struct g2d_data *g2d = dev_id;
+ u32 pending;
+
+ pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
+ if (pending)
+ writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
+
+ if (pending & G2D_INTP_GCMD_FIN) {
+ u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
+
+ cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
+ G2D_DMA_LIST_DONE_COUNT_OFFSET;
+
+ g2d_finish_event(g2d, cmdlist_no);
+
+ writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
+ if (!(pending & G2D_INTP_ACMD_FIN)) {
+ writel_relaxed(G2D_DMA_CONTINUE,
+ g2d->regs + G2D_DMA_COMMAND);
+ }
+ }
+
+ if (pending & G2D_INTP_ACMD_FIN)
+ queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
+ return IRQ_HANDLED;
+}
+
+static int g2d_check_reg_offset(struct device *dev,
+ struct g2d_cmdlist_node *node,
+ int nr, bool for_addr)
+{
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
+ int reg_offset;
+ int index;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct g2d_buf_info *buf_info = &node->buf_info;
+ struct g2d_buf_desc *buf_desc;
+ enum g2d_reg_type reg_type;
+ unsigned long value;
+
+ index = cmdlist->last - 2 * (i + 1);
+
+ reg_offset = cmdlist->data[index] & ~0xfffff000;
+ if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
+ goto err;
+ if (reg_offset % 4)
+ goto err;
+
+ switch (reg_offset) {
+ case G2D_SRC_BASE_ADDR:
+ case G2D_SRC_PLANE2_BASE_ADDR:
+ case G2D_DST_BASE_ADDR:
+ case G2D_DST_PLANE2_BASE_ADDR:
+ case G2D_PAT_BASE_ADDR:
+ case G2D_MSK_BASE_ADDR:
+ if (!for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ /* check userptr buffer type. */
+ if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
+ buf_info->types[reg_type] = BUF_TYPE_USERPTR;
+ cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+ } else
+ buf_info->types[reg_type] = BUF_TYPE_GEM;
+ break;
+ case G2D_SRC_COLOR_MODE:
+ case G2D_DST_COLOR_MODE:
+ if (for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ buf_desc = &buf_info->descs[reg_type];
+ value = cmdlist->data[index + 1];
+
+ buf_desc->format = value & 0xf;
+ break;
+ case G2D_SRC_LEFT_TOP:
+ case G2D_DST_LEFT_TOP:
+ if (for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ buf_desc = &buf_info->descs[reg_type];
+ value = cmdlist->data[index + 1];
+
+ buf_desc->left_x = value & 0x1fff;
+ buf_desc->top_y = (value & 0x1fff0000) >> 16;
+ break;
+ case G2D_SRC_RIGHT_BOTTOM:
+ case G2D_DST_RIGHT_BOTTOM:
+ if (for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ buf_desc = &buf_info->descs[reg_type];
+ value = cmdlist->data[index + 1];
+
+ buf_desc->right_x = value & 0x1fff;
+ buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
+ break;
+ default:
+ if (for_addr)
+ goto err;
+ break;
+ }
+ }
+
+ return 0;
+
+err:
+ dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+ return -EINVAL;
+}
+
+/* ioctl functions */
+int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_g2d_get_ver *ver = data;
+
+ ver->major = G2D_HW_MAJOR_VER;
+ ver->minor = G2D_HW_MINOR_VER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
+
+int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_set_cmdlist *req = data;
+ struct drm_exynos_g2d_cmd *cmd;
+ struct drm_exynos_pending_g2d_event *e;
+ struct g2d_cmdlist_node *node;
+ struct g2d_cmdlist *cmdlist;
+ unsigned long flags;
+ int size;
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ node = g2d_get_cmdlist(g2d);
+ if (!node)
+ return -ENOMEM;
+
+ node->event = NULL;
+
+ if (req->event_type != G2D_EVENT_NOT) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (file->event_space < sizeof(e->event)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ ret = -ENOMEM;
+ goto err;
+ }
+ file->event_space -= sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ e = kzalloc(sizeof(*node->event), GFP_KERNEL);
+ if (!e) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ e->event.base.type = DRM_EXYNOS_G2D_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = req->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ node->event = e;
+ }
+
+ cmdlist = node->cmdlist;
+
+ cmdlist->last = 0;
+
+ /*
+ * If don't clear SFR registers, the cmdlist is affected by register
+ * values of previous cmdlist. G2D hw executes SFR clear command and
+ * a next command at the same time then the next command is ignored and
+ * is executed rightly from next next command, so needs a dummy command
+ * to next command of SFR clear command.
+ */
+ cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
+ cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
+ cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
+ cmdlist->data[cmdlist->last++] = 0;
+
+ /*
+ * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
+ * and GCF bit should be set to INTEN register if user wants
+ * G2D interrupt event once current command list execution is
+ * finished.
+ * Otherwise only ACF bit should be set to INTEN register so
+ * that one interrupt is occurred after all command lists
+ * have been completed.
+ */
+ if (node->event) {
+ cmdlist->data[cmdlist->last++] = G2D_INTEN;
+ cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
+ cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
+ cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
+ } else {
+ cmdlist->data[cmdlist->last++] = G2D_INTEN;
+ cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
+ }
+
+ /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
+ if (size > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "cmdlist size is too big\n");
+ ret = -EINVAL;
+ goto err_free_event;
+ }
+
+ cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd,
+ sizeof(*cmd) * req->cmd_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+ if (ret < 0)
+ goto err_free_event;
+
+ node->buf_info.map_nr = req->cmd_buf_nr;
+ if (req->cmd_buf_nr) {
+ struct drm_exynos_g2d_cmd *cmd_buf;
+
+ cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd_buf,
+ sizeof(*cmd_buf) * req->cmd_buf_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_buf_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+ if (ret < 0)
+ goto err_free_event;
+
+ ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
+ if (ret < 0)
+ goto err_unmap;
+ }
+
+ cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
+ cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
+
+ /* head */
+ cmdlist->head = cmdlist->last / 2;
+
+ /* tail */
+ cmdlist->data[cmdlist->last] = 0;
+
+ g2d_add_cmdlist_to_inuse(g2d_priv, node);
+
+ return 0;
+
+err_unmap:
+ g2d_unmap_cmdlist_gem(g2d, node, file);
+err_free_event:
+ if (node->event) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ kfree(node->event);
+ }
+err:
+ g2d_put_cmdlist(g2d, node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
+
+int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_exec *req = data;
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head *run_cmdlist;
+ struct list_head *event_list;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
+ if (!runqueue_node) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ run_cmdlist = &runqueue_node->run_cmdlist;
+ event_list = &runqueue_node->event_list;
+ INIT_LIST_HEAD(run_cmdlist);
+ INIT_LIST_HEAD(event_list);
+ init_completion(&runqueue_node->complete);
+ runqueue_node->async = req->async;
+
+ list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&g2d_priv->event_list, event_list);
+
+ if (list_empty(run_cmdlist)) {
+ dev_err(dev, "there is no inuse cmdlist\n");
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+ return -EPERM;
+ }
+
+ mutex_lock(&g2d->runqueue_mutex);
+ runqueue_node->pid = current->pid;
+ runqueue_node->filp = file;
+ list_add_tail(&runqueue_node->list, &g2d->runqueue);
+ if (!g2d->runqueue_node)
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ if (runqueue_node->async)
+ goto out;
+
+ wait_for_completion(&runqueue_node->complete);
+ g2d_free_runqueue_node(g2d, runqueue_node);
+
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct g2d_data *g2d;
+ int ret;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ if (!is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ }
+
+ return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ if (!is_drm_iommu_supported(drm_dev))
+ return;
+
+ drm_iommu_detach_device(drm_dev, dev);
+}
+
+static int g2d_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv;
+
+ g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
+ if (!g2d_priv)
+ return -ENOMEM;
+
+ g2d_priv->dev = dev;
+ file_priv->g2d_priv = g2d_priv;
+
+ INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&g2d_priv->event_list);
+ INIT_LIST_HEAD(&g2d_priv->userptr_list);
+
+ return 0;
+}
+
+static void g2d_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_data *g2d;
+ struct g2d_cmdlist_node *node, *n;
+
+ if (!dev)
+ return;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ /*
+ * unmap all gem objects not completed.
+ *
+ * P.S. if current process was terminated forcely then
+ * there may be some commands in inuse_cmdlist so unmap
+ * them.
+ */
+ g2d_unmap_cmdlist_gem(g2d, node, file);
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ }
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ /* release all g2d_userptr in pool. */
+ g2d_userptr_free_all(drm_dev, g2d, file);
+
+ kfree(file_priv->g2d_priv);
+}
+
+static int g2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct g2d_data *g2d;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
+ if (!g2d)
+ return -ENOMEM;
+
+ g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
+ sizeof(struct g2d_runqueue_node), 0, 0, NULL);
+ if (!g2d->runqueue_slab)
+ return -ENOMEM;
+
+ g2d->dev = dev;
+
+ g2d->g2d_workq = create_singlethread_workqueue("g2d");
+ if (!g2d->g2d_workq) {
+ dev_err(dev, "failed to create workqueue\n");
+ ret = -EINVAL;
+ goto err_destroy_slab;
+ }
+
+ INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
+ INIT_LIST_HEAD(&g2d->free_cmdlist);
+ INIT_LIST_HEAD(&g2d->runqueue);
+
+ mutex_init(&g2d->cmdlist_mutex);
+ mutex_init(&g2d->runqueue_mutex);
+
+ g2d->gate_clk = devm_clk_get(dev, "fimg2d");
+ if (IS_ERR(g2d->gate_clk)) {
+ dev_err(dev, "failed to get gate clock\n");
+ ret = PTR_ERR(g2d->gate_clk);
+ goto err_destroy_workqueue;
+ }
+
+ pm_runtime_enable(dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ g2d->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(g2d->regs)) {
+ ret = PTR_ERR(g2d->regs);
+ goto err_put_clk;
+ }
+
+ g2d->irq = platform_get_irq(pdev, 0);
+ if (g2d->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ ret = g2d->irq;
+ goto err_put_clk;
+ }
+
+ ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
+ "drm_g2d", g2d);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_put_clk;
+ }
+
+ g2d->max_pool = MAX_POOL;
+
+ platform_set_drvdata(pdev, g2d);
+
+ subdrv = &g2d->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = g2d_subdrv_probe;
+ subdrv->remove = g2d_subdrv_remove;
+ subdrv->open = g2d_open;
+ subdrv->close = g2d_close;
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm g2d device\n");
+ goto err_put_clk;
+ }
+
+ dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+
+ return 0;
+
+err_put_clk:
+ pm_runtime_disable(dev);
+err_destroy_workqueue:
+ destroy_workqueue(g2d->g2d_workq);
+err_destroy_slab:
+ kmem_cache_destroy(g2d->runqueue_slab);
+ return ret;
+}
+
+static int g2d_remove(struct platform_device *pdev)
+{
+ struct g2d_data *g2d = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&g2d->runqueue_work);
+ exynos_drm_subdrv_unregister(&g2d->subdrv);
+
+ while (g2d->runqueue_node) {
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ g2d_fini_cmdlist(g2d);
+ destroy_workqueue(g2d->g2d_workq);
+ kmem_cache_destroy(g2d->runqueue_slab);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int g2d_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ mutex_lock(&g2d->runqueue_mutex);
+ g2d->suspended = true;
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ while (g2d->runqueue_node)
+ /* FIXME: good range? */
+ usleep_range(500, 1000);
+
+ flush_work(&g2d->runqueue_work);
+
+ return 0;
+}
+
+static int g2d_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ g2d->suspended = false;
+ g2d_exec_runqueue(g2d);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int g2d_runtime_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(g2d->gate_clk);
+
+ return 0;
+}
+
+static int g2d_runtime_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(g2d->gate_clk);
+ if (ret < 0)
+ dev_warn(dev, "failed to enable clock.\n");
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops g2d_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+};
+
+static const struct of_device_id exynos_g2d_match[] = {
+ { .compatible = "samsung,exynos5250-g2d" },
+ {},
+};
+
+struct platform_driver g2d_driver = {
+ .probe = g2d_probe,
+ .remove = g2d_remove,
+ .driver = {
+ .name = "s5p-g2d",
+ .owner = THIS_MODULE,
+ .pm = &g2d_pm_ops,
+ .of_match_table = exynos_g2d_match,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
new file mode 100644
index 00000000000..1a9c7ca8c15
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+#else
+static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
new file mode 100644
index 00000000000..163a054922c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -0,0 +1,728 @@
+/* exynos_drm_gem.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
+
+#include <linux/shmem_fs.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
+
+static unsigned int convert_to_vm_err_msg(int msg)
+{
+ unsigned int out_msg;
+
+ switch (msg) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ out_msg = VM_FAULT_NOPAGE;
+ break;
+
+ case -ENOMEM:
+ out_msg = VM_FAULT_OOM;
+ break;
+
+ default:
+ out_msg = VM_FAULT_SIGBUS;
+ break;
+ }
+
+ return out_msg;
+}
+
+static int check_gem_flags(unsigned int flags)
+{
+ if (flags & ~(EXYNOS_BO_MASK)) {
+ DRM_ERROR("invalid flags.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
+ struct vm_area_struct *vma)
+{
+ DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+ /* non-cachable as default. */
+ if (obj->flags & EXYNOS_BO_CACHABLE)
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ else if (obj->flags & EXYNOS_BO_WC)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+}
+
+static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
+{
+ /* TODO */
+
+ return roundup(size, PAGE_SIZE);
+}
+
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma,
+ unsigned long f_vaddr,
+ pgoff_t page_offset)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
+ unsigned long pfn;
+ int i;
+
+ if (!buf->sgt)
+ return -EINTR;
+
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
+ return -EINVAL;
+ }
+
+ sgl = buf->sgt->sgl;
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
+ }
+
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+
+ return vm_insert_mixed(vma, f_vaddr, pfn);
+}
+
+static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ int ret;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
+}
+
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ struct drm_gem_object *obj;
+ struct exynos_drm_gem_buf *buf;
+
+ obj = &exynos_gem_obj->base;
+ buf = exynos_gem_obj->buffer;
+
+ DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
+
+ /*
+ * do not release memory region from exporter.
+ *
+ * the region will be released by exporter
+ * once dmabuf's refcount becomes 0.
+ */
+ if (obj->import_attach)
+ goto out;
+
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+
+out:
+ exynos_drm_fini_buf(obj->dev, buf);
+ exynos_gem_obj->buffer = NULL;
+
+ drm_gem_free_mmap_offset(obj);
+
+ /* release file pointer to gem object. */
+ drm_gem_object_release(obj);
+
+ kfree(exynos_gem_obj);
+ exynos_gem_obj = NULL;
+}
+
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return 0;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return exynos_gem_obj->buffer->size;
+}
+
+
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
+ if (!exynos_gem_obj)
+ return NULL;
+
+ exynos_gem_obj->size = size;
+ obj = &exynos_gem_obj->base;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize gem object\n");
+ kfree(exynos_gem_obj);
+ return NULL;
+ }
+
+ DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
+
+ return exynos_gem_obj;
+}
+
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ unsigned int flags,
+ unsigned long size)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
+ int ret;
+
+ if (!size) {
+ DRM_ERROR("invalid size.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = roundup_gem_size(size, flags);
+
+ ret = check_gem_flags(flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ buf = exynos_drm_init_buf(dev, size);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ exynos_gem_obj = exynos_drm_gem_init(dev, size);
+ if (!exynos_gem_obj) {
+ ret = -ENOMEM;
+ goto err_fini_buf;
+ }
+
+ exynos_gem_obj->buffer = buf;
+
+ /* set memory type and cache attribute from user side. */
+ exynos_gem_obj->flags = flags;
+
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0)
+ goto err_gem_fini;
+
+ return exynos_gem_obj;
+
+err_gem_fini:
+ drm_gem_object_release(&exynos_gem_obj->base);
+ kfree(exynos_gem_obj);
+err_fini_buf:
+ exynos_drm_fini_buf(dev, buf);
+ return ERR_PTR(ret);
+}
+
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_gem_create *args = data;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
+ if (IS_ERR(exynos_gem_obj))
+ return PTR_ERR(exynos_gem_obj);
+
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
+ return 0;
+}
+
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *filp)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ return &exynos_gem_obj->buffer->dma_addr;
+}
+
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *filp)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ /*
+ * decrease obj->refcount one more time because we has already
+ * increased it at exynos_drm_gem_get_dma_addr().
+ */
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_gem_map_off *args = data;
+
+ DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
+ args->handle, (unsigned long)args->offset);
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ return -ENODEV;
+ }
+
+ return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
+}
+
+int exynos_drm_gem_mmap_buffer(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = filp->private_data;
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *drm_dev = obj->dev;
+ struct exynos_drm_gem_buf *buffer;
+ unsigned long vm_size;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = obj;
+ vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+ update_vm_cache_attr(exynos_gem_obj, vma);
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ /*
+ * a buffer contains information to physically continuous memory
+ * allocated by user request or at framebuffer creation.
+ */
+ buffer = exynos_gem_obj->buffer;
+
+ /* check if user-requested size is valid. */
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ /*
+ * take a reference to this mapping of the object. And this reference
+ * is unreferenced by the corresponding vm_close call.
+ */
+ drm_gem_object_reference(obj);
+
+ drm_vm_open_locked(drm_dev, vma);
+
+ return 0;
+}
+
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_file_private *exynos_file_priv;
+ struct drm_exynos_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ struct file *anon_filp;
+ unsigned long addr;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_file_priv = file_priv->driver_priv;
+ anon_filp = exynos_file_priv->anon_filp;
+ anon_filp->private_data = obj;
+
+ addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, 0);
+
+ drm_gem_object_unreference(obj);
+
+ if (IS_ERR_VALUE(addr)) {
+ mutex_unlock(&dev->struct_mutex);
+ return (int)addr;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ args->mapped = addr;
+
+ DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
+
+ return 0;
+}
+
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_exynos_gem_info *args = data;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ args->flags = exynos_gem_obj->flags;
+ args->size = exynos_gem_obj->size;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (!vma_copy)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma)
+{
+ int get_npages;
+
+ /* the memory region mmaped with VM_PFNMAP. */
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ pages[i] = pfn_to_page(pfn);
+ }
+
+ if (i != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ get_npages = get_user_pages(current, current->mm, start,
+ npages, 1, 1, pages, NULL);
+ get_npages = max(get_npages, 0);
+ if (get_npages != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ while (get_npages)
+ put_page(pages[--get_npages]);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma)
+{
+ if (!vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(pages[i]);
+
+ /*
+ * undo the reference we took when populating
+ * the table.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ int nents;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with dma.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return nents;
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
+void exynos_drm_gem_free_object(struct drm_gem_object *obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+ buf = exynos_gem_obj->buffer;
+
+ if (obj->import_attach)
+ drm_prime_gem_destroy(obj, buf->sgt);
+
+ exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
+}
+
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
+ /*
+ * allocate memory to be used for framebuffer.
+ * - this callback would be called by user application
+ * with DRM_IOCTL_MODE_CREATE_DUMB command.
+ */
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ if (is_drm_iommu_supported(dev)) {
+ exynos_gem_obj = exynos_drm_gem_create(dev,
+ EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
+ args->size);
+ } else {
+ exynos_gem_obj = exynos_drm_gem_create(dev,
+ EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
+ args->size);
+ }
+
+ if (IS_ERR(exynos_gem_obj)) {
+ dev_warn(dev->dev, "FB allocation failed.\n");
+ return PTR_ERR(exynos_gem_obj);
+ }
+
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
+ return 0;
+}
+
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * get offset of memory allocated for drm framebuffer.
+ * - this callback would be called by user application
+ * with DRM_IOCTL_MODE_MAP_DUMB command.
+ */
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
+ unsigned long f_vaddr;
+ pgoff_t page_offset;
+ int ret;
+
+ page_offset = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+ f_vaddr = (unsigned long)vmf->virtual_address;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
+ if (ret < 0)
+ DRM_ERROR("failed to map a buffer with user.\n");
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return convert_to_vm_err_msg(ret);
+}
+
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ /* set vm_area_struct. */
+ ret = drm_gem_mmap(filp, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ obj = vma->vm_private_data;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_gem_flags(exynos_gem_obj->flags);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ drm_gem_free_mmap_offset(obj);
+ return ret;
+ }
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ update_vm_cache_attr(exynos_gem_obj, vma);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
new file mode 100644
index 00000000000..1592c0ba7de
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -0,0 +1,192 @@
+/* exynos_drm_gem.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_GEM_H_
+#define _EXYNOS_DRM_GEM_H_
+
+#define to_exynos_gem_obj(x) container_of(x,\
+ struct exynos_drm_gem_obj, base)
+
+#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
+
+/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
+ * @sgt: sg table to transfer page data.
+ * @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ * VM_PFNMAP or not.
+ */
+struct exynos_drm_gem_buf {
+ void __iomem *kvaddr;
+ unsigned long userptr;
+ dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
+ unsigned int write;
+ struct page **pages;
+ struct sg_table *sgt;
+ unsigned long size;
+ bool pfnmap;
+};
+
+/*
+ * exynos drm buffer structure.
+ *
+ * @base: a gem object.
+ * - a new handle to this gem object would be created
+ * by drm_gem_handle_create().
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ * - contain the information to memory region allocated
+ * by user request or at framebuffer creation.
+ * continuous memory region allocated by user request
+ * or at framebuffer creation.
+ * @size: size requested from user, in bytes and this size is aligned
+ * in page unit.
+ * @vma: a pointer to vm_area.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
+ *
+ * P.S. this object would be transferred to user as kms_bo.handle so
+ * user can access the buffer through kms_bo.handle.
+ */
+struct exynos_drm_gem_obj {
+ struct drm_gem_object base;
+ struct exynos_drm_gem_buf *buffer;
+ unsigned long size;
+ struct vm_area_struct *vma;
+ unsigned int flags;
+};
+
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+
+/* destroy a buffer with gem object */
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+
+/* create a private gem object and initialize it. */
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size);
+
+/* create a new buffer with gem object */
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ unsigned int flags,
+ unsigned long size);
+
+/*
+ * request gem object creation and buffer allocation as the size
+ * that it is calculated with framebuffer information such as width,
+ * height and bpp.
+ */
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/*
+ * get dma address from gem handle and this function could be used for
+ * other drivers such as 2d/3d acceleration drivers.
+ * with this function call, gem object reference count would be increased.
+ */
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *filp);
+
+/*
+ * put dma address from gem handle and this function could be used for
+ * other drivers such as 2d/3d acceleration drivers.
+ * with this function call, gem object reference count would be decreased.
+ */
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *filp);
+
+/* get buffer offset to map to user space. */
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int exynos_drm_gem_mmap_buffer(struct file *filp,
+ struct vm_area_struct *vma);
+
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* get buffer information to memory region allocated by gem. */
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* get buffer size to gem handle. */
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv);
+
+/* free gem object. */
+void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
+
+/* create memory region for drm framebuffer. */
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+/* map memory region for drm framebuffer to user space. */
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 00000000000..9e3ff167296
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1802 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC stands for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_SRC 4
+#define GSC_MAX_DST 16
+#define GSC_RESET_TIMEOUT 50
+#define GSC_BUF_STOP 1
+#define GSC_BUF_START 2
+#define GSC_REG_SZ 16
+#define GSC_WIDTH_ITU_709 1280
+#define GSC_SC_UP_MAX_RATIO 65536
+#define GSC_SC_DOWN_RATIO_7_8 74898
+#define GSC_SC_DOWN_RATIO_6_8 87381
+#define GSC_SC_DOWN_RATIO_5_8 104857
+#define GSC_SC_DOWN_RATIO_4_8 131072
+#define GSC_SC_DOWN_RATIO_3_8 174762
+#define GSC_SC_DOWN_RATIO_2_8 262144
+#define GSC_REFRESH_MIN 12
+#define GSC_REFRESH_MAX 60
+#define GSC_CROP_MAX 8192
+#define GSC_CROP_MIN 32
+#define GSC_SCALE_MAX 4224
+#define GSC_SCALE_MIN 32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR 16
+#define GSC_COEF_H_8T 8
+#define GSC_COEF_V_4T 4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct gsc_context, ippdrv);
+#define gsc_read(offset) readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+ bool range;
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ unsigned long main_hratio;
+ unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+ /* tile or rotation */
+ u32 tile_w;
+ u32 tile_h;
+ /* other cases */
+ u32 w;
+ u32 h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *gsc_clk;
+ struct gsc_scaler sc;
+ int id;
+ int irq;
+ bool rotation;
+ bool suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 },
+ { -2, 7, -21, 99, 57, -16, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 7, -2 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 3, -8, 14, 111, 13, -8, 3, 0 },
+ { 2, -6, 7, 112, 21, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 },
+ { 1, -1, -7, 103, 44, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 },
+ { 0, 3, -15, 85, 69, -17, 4, -1 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 },
+ { -1, 4, -17, 69, 85, -15, 3, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 },
+ { -1, 4, -15, 44, 103, -7, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 },
+ { -1, 4, -12, 28, 110, 1, -4, 2 },
+ { -1, 3, -10, 21, 112, 7, -6, 2 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 2, -11, 25, 96, 25, -11, 2, 0 },
+ { 2, -10, 19, 96, 31, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 },
+ { 2, -8, 10, 92, 43, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 },
+ { 0, 1, -12, 43, 92, 10, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 },
+ { 0, 2, -12, 31, 96, 19, -10, 2 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { -1, -8, 33, 80, 33, -8, -1, 0 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 },
+ { 0, -8, 20, 78, 46, -6, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 },
+ { 1, -7, 10, 71, 58, -1, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 },
+ { 1, -5, -1, 58, 71, 10, -7, 1 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 },
+ { 1, -3, -6, 46, 78, 20, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { -3, 0, 35, 64, 35, 0, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 },
+ { -2, -3, 24, 61, 46, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 },
+ { 0, -4, 6, 46, 61, 24, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { -1, 8, 33, 48, 33, 8, -1, 0 },
+ { -1, 7, 31, 49, 35, 9, -1, -1 },
+ { -1, 6, 30, 49, 36, 10, -1, -1 },
+ { -1, 5, 28, 48, 38, 12, -1, -1 },
+ { -1, 4, 26, 48, 39, 13, 0, -1 },
+ { -1, 3, 24, 47, 41, 15, 0, -1 },
+ { -1, 2, 23, 47, 42, 16, 0, -1 },
+ { -1, 2, 21, 45, 43, 18, 1, -1 },
+ { -1, 1, 19, 45, 45, 19, 1, -1 },
+ { -1, 1, 18, 43, 45, 21, 2, -1 },
+ { -1, 0, 16, 42, 47, 23, 2, -1 },
+ { -1, 0, 15, 41, 47, 24, 3, -1 },
+ { -1, 0, 13, 39, 48, 26, 4, -1 },
+ { -1, -1, 12, 38, 48, 28, 5, -1 },
+ { -1, -1, 10, 36, 49, 30, 6, -1 },
+ { -1, -1, 9, 35, 49, 31, 7, -1 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 2, 13, 30, 38, 30, 13, 2, 0 },
+ { 2, 12, 29, 38, 30, 14, 3, 0 },
+ { 2, 11, 28, 38, 31, 15, 3, 0 },
+ { 2, 10, 26, 38, 32, 16, 4, 0 },
+ { 1, 10, 26, 37, 33, 17, 4, 0 },
+ { 1, 9, 24, 37, 34, 18, 5, 0 },
+ { 1, 8, 24, 37, 34, 19, 5, 0 },
+ { 1, 7, 22, 36, 35, 20, 6, 1 },
+ { 1, 6, 21, 36, 36, 21, 6, 1 },
+ { 1, 6, 20, 35, 36, 22, 7, 1 },
+ { 0, 5, 19, 34, 37, 24, 8, 1 },
+ { 0, 5, 18, 34, 37, 24, 9, 1 },
+ { 0, 4, 17, 33, 37, 26, 10, 1 },
+ { 0, 4, 16, 32, 38, 26, 10, 2 },
+ { 0, 3, 15, 31, 38, 28, 11, 2 },
+ { 0, 3, 14, 30, 38, 29, 12, 2 }
+ }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 128, 0, 0 },
+ { -4, 127, 5, 0 },
+ { -6, 124, 11, -1 },
+ { -8, 118, 19, -1 },
+ { -8, 111, 27, -2 },
+ { -8, 102, 37, -3 },
+ { -8, 92, 48, -4 },
+ { -7, 81, 59, -5 },
+ { -6, 70, 70, -6 },
+ { -5, 59, 81, -7 },
+ { -4, 48, 92, -8 },
+ { -3, 37, 102, -8 },
+ { -2, 27, 111, -8 },
+ { -1, 19, 118, -8 },
+ { -1, 11, 124, -6 },
+ { 0, 5, 127, -4 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 8, 112, 8, 0 },
+ { 4, 111, 14, -1 },
+ { 1, 109, 20, -2 },
+ { -2, 105, 27, -2 },
+ { -3, 100, 34, -3 },
+ { -5, 93, 43, -3 },
+ { -5, 86, 51, -4 },
+ { -5, 77, 60, -4 },
+ { -5, 69, 69, -5 },
+ { -4, 60, 77, -5 },
+ { -4, 51, 86, -5 },
+ { -3, 43, 93, -5 },
+ { -3, 34, 100, -3 },
+ { -2, 27, 105, -2 },
+ { -2, 20, 109, 1 },
+ { -1, 14, 111, 4 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 16, 96, 16, 0 },
+ { 12, 97, 21, -2 },
+ { 8, 96, 26, -2 },
+ { 5, 93, 32, -2 },
+ { 2, 89, 39, -2 },
+ { 0, 84, 46, -2 },
+ { -1, 79, 53, -3 },
+ { -2, 73, 59, -2 },
+ { -2, 66, 66, -2 },
+ { -2, 59, 73, -2 },
+ { -3, 53, 79, -1 },
+ { -2, 46, 84, 0 },
+ { -2, 39, 89, 2 },
+ { -2, 32, 93, 5 },
+ { -2, 26, 96, 8 },
+ { -2, 21, 97, 12 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { 22, 84, 22, 0 },
+ { 18, 85, 26, -1 },
+ { 14, 84, 31, -1 },
+ { 11, 82, 36, -1 },
+ { 8, 79, 42, -1 },
+ { 6, 76, 47, -1 },
+ { 4, 72, 52, 0 },
+ { 2, 68, 58, 0 },
+ { 1, 63, 63, 1 },
+ { 0, 58, 68, 2 },
+ { 0, 52, 72, 4 },
+ { -1, 47, 76, 6 },
+ { -1, 42, 79, 8 },
+ { -1, 36, 82, 11 },
+ { -1, 31, 84, 14 },
+ { -1, 26, 85, 18 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { 26, 76, 26, 0 },
+ { 22, 76, 30, 0 },
+ { 19, 75, 34, 0 },
+ { 16, 73, 38, 1 },
+ { 13, 71, 43, 1 },
+ { 10, 69, 47, 2 },
+ { 8, 66, 51, 3 },
+ { 6, 63, 55, 4 },
+ { 5, 59, 59, 5 },
+ { 4, 55, 63, 6 },
+ { 3, 51, 66, 8 },
+ { 2, 47, 69, 10 },
+ { 1, 43, 71, 13 },
+ { 1, 38, 73, 16 },
+ { 0, 34, 75, 19 },
+ { 0, 30, 76, 22 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { 29, 70, 29, 0 },
+ { 26, 68, 32, 2 },
+ { 23, 67, 36, 2 },
+ { 20, 66, 39, 3 },
+ { 17, 65, 43, 3 },
+ { 15, 63, 46, 4 },
+ { 12, 61, 50, 5 },
+ { 10, 58, 53, 7 },
+ { 8, 56, 56, 8 },
+ { 7, 53, 58, 10 },
+ { 5, 50, 61, 12 },
+ { 4, 46, 63, 15 },
+ { 3, 43, 65, 17 },
+ { 3, 39, 66, 20 },
+ { 2, 36, 67, 23 },
+ { 2, 32, 68, 26 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 32, 64, 32, 0 },
+ { 28, 63, 34, 3 },
+ { 25, 62, 37, 4 },
+ { 22, 62, 40, 4 },
+ { 19, 61, 43, 5 },
+ { 17, 59, 46, 6 },
+ { 15, 58, 48, 7 },
+ { 13, 55, 51, 9 },
+ { 11, 53, 53, 11 },
+ { 9, 51, 55, 13 },
+ { 7, 48, 58, 15 },
+ { 6, 46, 59, 17 },
+ { 5, 43, 61, 19 },
+ { 4, 40, 62, 22 },
+ { 4, 37, 62, 25 },
+ { 3, 34, 63, 28 }
+ }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+ u32 cfg;
+ int count = GSC_RESET_TIMEOUT;
+
+ /* s/w reset */
+ cfg = (GSC_SW_RESET_SRESET);
+ gsc_write(cfg, GSC_SW_RESET);
+
+ /* wait s/w reset complete */
+ while (count--) {
+ cfg = gsc_read(GSC_SW_RESET);
+ if (!cfg)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cfg) {
+ DRM_ERROR("failed to reset gsc h/w.\n");
+ return -EBUSY;
+ }
+
+ /* reset sequence */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_IN_BASE_ADDR_MASK |
+ GSC_IN_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_OUT_BASE_ADDR_MASK |
+ GSC_OUT_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+ u32 gscblk_cfg;
+
+ gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+ if (enable)
+ gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+ GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+ GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+ else
+ gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+ writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+ bool overflow, bool done)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
+ enable, overflow, done);
+
+ cfg = gsc_read(GSC_IRQ);
+ cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+ if (enable)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+
+ if (overflow)
+ cfg &= ~GSC_IRQ_OR_MASK;
+ else
+ cfg |= GSC_IRQ_OR_MASK;
+
+ if (done)
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+
+ gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+ GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_IN_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_IN_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_IN_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+ GSC_SRCIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+ /* cropped size */
+ cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+ GSC_CROPPED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_CROPPED_SIZE);
+
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_SRCIMG_SIZE);
+ cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+ GSC_SRCIMG_WIDTH_MASK);
+
+ cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+ GSC_SRCIMG_HEIGHT(sz->vsize));
+
+ gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_IN_RGB_SD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+ .set_fmt = gsc_src_set_fmt,
+ .set_transf = gsc_src_set_transf,
+ .set_size = gsc_src_set_size,
+ .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+ GSC_OUT_GLOBAL_ALPHA_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_OUT_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_OUT_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+ DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
+
+ if (src >= dst * 8) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 4)
+ *ratio = 4;
+ else if (src >= dst * 2)
+ *ratio = 2;
+ else
+ *ratio = 1;
+
+ return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+ if (hratio == 4 && vratio == 4)
+ *shfactor = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *shfactor = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *shfactor = 2;
+ else if (hratio == 1 && vratio == 1)
+ *shfactor = 0;
+ else
+ *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ u32 src_w, src_h, dst_w, dst_h;
+ int ret = 0;
+
+ src_w = src->w;
+ src_h = src->h;
+
+ if (ctx->rotation) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("pre_hratio[%d]pre_vratio[%d]\n",
+ sc->pre_hratio, sc->pre_vratio);
+
+ sc->main_hratio = (src_w << 16) / dst_w;
+ sc->main_vratio = (src_h << 16) / dst_h;
+
+ DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ DRM_DEBUG_KMS("pre_shfactor[%d]\n", sc->pre_shfactor);
+
+ cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+ GSC_PRESC_H_RATIO(sc->pre_hratio) |
+ GSC_PRESC_V_RATIO(sc->pre_vratio));
+ gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+ return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_H_8T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(h_coef_8t[sc_ratio][i][j],
+ GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_V_4T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(v_coef_4t[sc_ratio][i][j],
+ GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
+
+ gsc_set_h_coef(ctx, sc->main_hratio);
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+ gsc_set_v_coef(ctx, sc->main_vratio);
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+ GSC_DSTIMG_OFFSET_Y(pos->y));
+ gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+ /* scaled size */
+ cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_SCALED_SIZE);
+
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_DSTIMG_SIZE);
+ cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+ GSC_DSTIMG_WIDTH_MASK);
+ cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+ GSC_DSTIMG_HEIGHT(sz->vsize));
+ gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+ u32 cfg, i, buf_num = GSC_REG_SZ;
+ u32 mask = 0x00000001;
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ for (i = 0; i < GSC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num--;
+
+ DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
+
+ return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+ gsc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+ gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+ .set_fmt = gsc_dst_set_fmt,
+ .set_transf = gsc_dst_set_transf,
+ .set_size = gsc_dst_set_size,
+ .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ if (enable) {
+ clk_enable(ctx->gsc_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->gsc_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_SRC;
+ int ret;
+
+ DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_SRC; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_SRC) {
+ DRM_ERROR("failed to get in buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_DST;
+ int ret;
+
+ DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_DST; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_DST) {
+ DRM_ERROR("failed to get out buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+ struct gsc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ u32 status;
+ int buf_id[EXYNOS_DRM_OPS_MAX];
+
+ DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+
+ status = gsc_read(GSC_IRQ);
+ if (status & GSC_IRQ_STATUS_OR_IRQ) {
+ dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return IRQ_NONE;
+ }
+
+ if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+ dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n",
+ ctx->id, status);
+
+ buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+ return IRQ_HANDLED;
+
+ buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n",
+ buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+ buf_id[EXYNOS_DRM_OPS_SRC];
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = GSC_REFRESH_MIN;
+ prop_list->refresh_max = GSC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = GSC_CROP_MAX;
+ prop_list->crop_max.vsize = GSC_CROP_MAX;
+ prop_list->crop_min.hsize = GSC_CROP_MIN;
+ prop_list->crop_min.vsize = GSC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = GSC_SCALE_MAX;
+ prop_list->scale_max.vsize = GSC_SCALE_MAX;
+ prop_list->scale_min.hsize = GSC_SCALE_MIN;
+ prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+ return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("invalid flip\n");
+ return false;
+ }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!gsc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct gsc_scaler *sc = &ctx->sc;
+ int ret;
+
+ /* reset h/w block */
+ ret = gsc_sw_reset(ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset hardware.\n");
+ return ret;
+ }
+
+ /* scaler setting */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+ sc->range = true;
+
+ return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ u32 cfg;
+ int ret, i;
+
+ DRM_DEBUG_KMS("cmd[%d]\n", cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ gsc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* enable one shot */
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+ GSC_ENABLE_CLK_GATE_MODE_MASK);
+ cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+ gsc_write(cfg, GSC_ENABLE);
+
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_WB:
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+ /* src local path */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_OUTPUT:
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst local path */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ ret = gsc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ gsc_set_scaler(ctx, &ctx->sc);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg |= GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+
+ return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("cmd[%d]\n", cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ gsc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+}
+
+static int gsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->gsc_clk = devm_clk_get(dev, "gscl");
+ if (IS_ERR(ctx->gsc_clk)) {
+ dev_err(dev, "failed to get gsc clock.\n");
+ return PTR_ERR(ctx->gsc_clk);
+ }
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ return -ENOENT;
+ }
+
+ ctx->irq = res->start;
+ ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
+ IRQF_ONESHOT, "drm_gsc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ return ret;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+ ippdrv->check_property = gsc_ippdrv_check_property;
+ ippdrv->reset = gsc_ippdrv_reset;
+ ippdrv->start = gsc_ippdrv_start;
+ ippdrv->stop = gsc_ippdrv_stop;
+ ret = gsc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm gsc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(dev, "drm gsc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int gsc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+
+ return gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = gsc_remove,
+ .driver = {
+ .name = "exynos-drm-gsc",
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 00000000000..29ec1c5efcf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 00000000000..b32b291f88f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,143 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+
+ if (!priv->da_start)
+ priv->da_start = EXYNOS_DEV_ADDR_START;
+ if (!priv->da_space_size)
+ priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+ priv->da_space_size);
+
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ goto error;
+
+ dma_set_max_seg_size(dev, 0xffffffffu);
+ dev->archdata.mapping = mapping;
+
+ return 0;
+error:
+ arm_iommu_release_mapping(mapping);
+ return -ENOMEM;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ if (!dev->archdata.mapping) {
+ DRM_ERROR("iommu_mapping is null.\n");
+ return -EFAULT;
+ }
+
+ subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+ sizeof(*subdrv_dev->dma_parms),
+ GFP_KERNEL);
+ if (!subdrv_dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+ ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed iommu attach.\n");
+ return ret;
+ }
+
+ /*
+ * Set dma_ops to drm_device just one time.
+ *
+ * The dma mapping api needs device object and the api is used
+ * to allocate physial memory and map it with iommu table.
+ * If iommu attach succeeded, the sub driver would have dma_ops
+ * for iommu and also all sub drivers have same dma_ops.
+ */
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ if (!mapping || !mapping->domain)
+ return;
+
+ iommu_detach_device(mapping->domain, subdrv_dev);
+ drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 00000000000..72376d41c51
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,70 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+ struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct device *dev = drm_dev->dev;
+
+ return dev->archdata.mapping ? true : false;
+#else
+ return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 00000000000..a1888e128f1
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,1989 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP stands for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
+
+/* platform device pointer for ipp device. */
+static struct platform_device *exynos_drm_ipp_pdev;
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+ struct drm_pending_event base;
+ struct drm_exynos_ipp_event event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+ struct list_head list;
+ enum drm_exynos_ops_id ops_id;
+ u32 prop_id;
+ u32 buf_id;
+ struct drm_exynos_ipp_buf_info buf_info;
+ struct drm_file *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+ struct exynos_drm_subdrv subdrv;
+ struct mutex ipp_lock;
+ struct mutex prop_lock;
+ struct idr ipp_idr;
+ struct idr prop_idr;
+ struct workqueue_struct *event_workq;
+ struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_platform_device_ipp_register(void)
+{
+ struct platform_device *pdev;
+
+ if (exynos_drm_ipp_pdev)
+ return -EEXIST;
+
+ pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ exynos_drm_ipp_pdev = pdev;
+
+ return 0;
+}
+
+void exynos_platform_device_ipp_unregister(void)
+{
+ if (exynos_drm_ipp_pdev) {
+ platform_device_unregister(exynos_drm_ipp_pdev);
+ exynos_drm_ipp_pdev = NULL;
+ }
+}
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_del(&ippdrv->drv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+ u32 *idp)
+{
+ int ret;
+
+ /* do the allocation under our mutexlock */
+ mutex_lock(lock);
+ ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
+ mutex_unlock(lock);
+ if (ret < 0)
+ return ret;
+
+ *idp = ret;
+ return 0;
+}
+
+static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ mutex_lock(lock);
+ idr_remove(id_idr, id);
+ mutex_unlock(lock);
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ void *obj;
+
+ DRM_DEBUG_KMS("id[%d]\n", id);
+
+ mutex_lock(lock);
+
+ /* find object using handle */
+ obj = idr_find(id_idr, id);
+ if (!obj) {
+ DRM_ERROR("failed to find object.\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_unlock(lock);
+
+ return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+ enum drm_exynos_ipp_cmd cmd)
+{
+ /*
+ * check dedicated flag and WB, OUTPUT operation with
+ * power on state.
+ */
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return true;
+
+ return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 ipp_id = property->ipp_id;
+
+ DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
+
+ if (ipp_id) {
+ /* find ipp driver using idr */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ ipp_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+ return ippdrv;
+ }
+
+ /*
+ * WB, OUTPUT opertion not supported multi-operation.
+ * so, make dedicated state at set property ioctl.
+ * when ipp driver finished operations, clear dedicated flags.
+ */
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_ERROR("already used choose device.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * This is necessary to find correct device in ipp drivers.
+ * ipp drivers have different abilities,
+ * so need to check property.
+ */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_ERROR("not support property.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ippdrv;
+ } else {
+ /*
+ * This case is search all ipp driver for finding.
+ * user application don't set ipp_id in this case,
+ * so ipp subsystem search correct driver in driver list.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_DEBUG_KMS("used device.\n");
+ continue;
+ }
+
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_DEBUG_KMS("not support property.\n");
+ continue;
+ }
+
+ return ippdrv;
+ }
+
+ DRM_ERROR("not support ipp driver operations.\n");
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
+
+ /*
+ * This case is search ipp driver by prop_id handle.
+ * sometimes, ipp subsystem find driver by prop_id.
+ * e.g PAUSE state, queue buf, command control.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
+
+ mutex_lock(&ippdrv->cmd_lock);
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if (c_node->property.prop_id == prop_id) {
+ mutex_unlock(&ippdrv->cmd_lock);
+ return ippdrv;
+ }
+ }
+ mutex_unlock(&ippdrv->cmd_lock);
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_prop_list *prop_list = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ int count = 0;
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!prop_list) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
+
+ if (!prop_list->ipp_id) {
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ count++;
+
+ /*
+ * Supports ippdrv list count for user application.
+ * First step user application getting ippdrv count.
+ * and second step getting ippdrv capability using ipp_id.
+ */
+ prop_list->count = count;
+ } else {
+ /*
+ * Getting ippdrv capability by ipp_id.
+ * some device not supported wb, output interface.
+ * so, user application detect correct ipp driver
+ * using this ioctl.
+ */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ prop_list->ipp_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("not found ipp%d driver.\n",
+ prop_list->ipp_id);
+ return PTR_ERR(ippdrv);
+ }
+
+ *prop_list = ippdrv->prop_list;
+ }
+
+ return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+ int idx)
+{
+ struct drm_exynos_ipp_config *config = &property->config[idx];
+ struct drm_exynos_pos *pos = &config->pos;
+ struct drm_exynos_sz *sz = &config->sz;
+
+ DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
+ property->prop_id, idx ? "dst" : "src", config->fmt);
+
+ DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ u32 prop_id = property->prop_id;
+
+ DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find command node using command list in ippdrv.
+ * when we find this command no using prop_id.
+ * return property information set in this command node.
+ */
+ mutex_lock(&ippdrv->cmd_lock);
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if ((c_node->property.prop_id == prop_id) &&
+ (c_node->state == IPP_STATE_STOP)) {
+ mutex_unlock(&ippdrv->cmd_lock);
+ DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
+ property->cmd, (int)ippdrv);
+
+ c_node->property = *property;
+ return 0;
+ }
+ }
+ mutex_unlock(&ippdrv->cmd_lock);
+
+ DRM_ERROR("failed to search property.\n");
+
+ return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+
+ cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+ if (!cmd_work)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+ return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+ struct drm_exynos_ipp_event_work *event_work;
+
+ event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+ if (!event_work)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+ return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_property *property = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret, i;
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is log print for user application property.
+ * user application set various property.
+ */
+ for_each_ipp_ops(i)
+ ipp_print_property(property, i);
+
+ /*
+ * set property ioctl generated new prop_id.
+ * but in this case already asigned prop_id using old set property.
+ * e.g PAUSE state. this case supports find current prop_id and use it
+ * instead of allocation.
+ */
+ if (property->prop_id) {
+ DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
+ return ipp_find_and_set_property(property);
+ }
+
+ /* find ipp driver using ipp id */
+ ippdrv = ipp_find_driver(ctx, property);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /* allocate command node */
+ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+ if (!c_node)
+ return -ENOMEM;
+
+ /* create property id */
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+ &property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+ property->prop_id, property->cmd, (int)ippdrv);
+
+ /* stored property information and ippdrv in private data */
+ c_node->priv = priv;
+ c_node->property = *property;
+ c_node->state = IPP_STATE_IDLE;
+
+ c_node->start_work = ipp_create_cmd_work();
+ if (IS_ERR(c_node->start_work)) {
+ DRM_ERROR("failed to create start work.\n");
+ goto err_remove_id;
+ }
+
+ c_node->stop_work = ipp_create_cmd_work();
+ if (IS_ERR(c_node->stop_work)) {
+ DRM_ERROR("failed to create stop work.\n");
+ goto err_free_start;
+ }
+
+ c_node->event_work = ipp_create_event_work();
+ if (IS_ERR(c_node->event_work)) {
+ DRM_ERROR("failed to create event work.\n");
+ goto err_free_stop;
+ }
+
+ mutex_init(&c_node->lock);
+ mutex_init(&c_node->mem_lock);
+ mutex_init(&c_node->event_lock);
+
+ init_completion(&c_node->start_complete);
+ init_completion(&c_node->stop_complete);
+
+ for_each_ipp_ops(i)
+ INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+ INIT_LIST_HEAD(&c_node->event_list);
+ list_splice_init(&priv->event_list, &c_node->event_list);
+ mutex_lock(&ippdrv->cmd_lock);
+ list_add_tail(&c_node->list, &ippdrv->cmd_list);
+ mutex_unlock(&ippdrv->cmd_lock);
+
+ /* make dedicated state without m2m */
+ if (!ipp_is_m2m_cmd(property->cmd))
+ ippdrv->dedicated = true;
+
+ return 0;
+
+err_free_stop:
+ kfree(c_node->stop_work);
+err_free_start:
+ kfree(c_node->start_work);
+err_remove_id:
+ ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
+err_clear:
+ kfree(c_node);
+ return ret;
+}
+
+static void ipp_clean_cmd_node(struct ipp_context *ctx,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ /* delete list */
+ list_del(&c_node->list);
+
+ ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
+ c_node->property.prop_id);
+
+ /* destroy mutex */
+ mutex_destroy(&c_node->lock);
+ mutex_destroy(&c_node->mem_lock);
+ mutex_destroy(&c_node->event_lock);
+
+ /* free command node */
+ kfree(c_node->start_work);
+ kfree(c_node->stop_work);
+ kfree(c_node->event_work);
+ kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ /* find memory node entry */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
+ i ? "dst" : "src", count[i], (int)m_node);
+ count[i]++;
+ }
+ }
+
+ DRM_DEBUG_KMS("min[%d]max[%d]\n",
+ min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+ max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+ /*
+ * M2M operations should be need paired memory address.
+ * so, need to check minimum count about src, dst.
+ * other case not use paired memory, so use maximum count
+ */
+ if (ipp_is_m2m_cmd(property->cmd))
+ ret = min(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+ else
+ ret = max(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int count = 0;
+
+ DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
+
+ /* source/destination memory list */
+ head = &c_node->mem_list[qbuf->ops_id];
+
+ /* find memory node from memory list */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
+
+ /* compare buffer id */
+ if (m_node->buf_id == qbuf->buf_id)
+ return m_node;
+ }
+
+ return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid queue node.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
+
+ /* get operations callback */
+ ops = ippdrv->ops[m_node->ops_id];
+ if (!ops) {
+ DRM_ERROR("not support ops.\n");
+ return -EFAULT;
+ }
+
+ /* set address and enable irq */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+ m_node->buf_id, IPP_BUF_ENQUEUE);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_get_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info buf_info;
+ void *addr;
+ int i;
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node)
+ return ERR_PTR(-ENOMEM);
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* operations, buffer id */
+ m_node->ops_id = qbuf->ops_id;
+ m_node->prop_id = qbuf->prop_id;
+ m_node->buf_id = qbuf->buf_id;
+
+ DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
+
+ for_each_ipp_planar(i) {
+ DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
+
+ /* get dma address by handle */
+ if (qbuf->handle[i]) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ qbuf->handle[i], file);
+ if (IS_ERR(addr)) {
+ DRM_ERROR("failed to get addr.\n");
+ goto err_clear;
+ }
+
+ buf_info.handles[i] = qbuf->handle[i];
+ buf_info.base[i] = *(dma_addr_t *) addr;
+ DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
+ i, buf_info.base[i], (int)buf_info.handles[i]);
+ }
+ }
+
+ m_node->filp = file;
+ m_node->buf_info = buf_info;
+ mutex_lock(&c_node->mem_lock);
+ list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+ mutex_unlock(&c_node->mem_lock);
+
+ return m_node;
+
+err_clear:
+ kfree(m_node);
+ return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ int i;
+
+ DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid dequeue node.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
+
+ /* put gem buffer */
+ for_each_ipp_planar(i) {
+ unsigned long handle = m_node->buf_info.handles[i];
+ if (handle)
+ exynos_drm_gem_put_dma_addr(drm_dev, handle,
+ m_node->filp);
+ }
+
+ /* delete list in queue */
+ list_del(&m_node->list);
+ kfree(m_node);
+
+ return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+ kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ return -ENOMEM;
+ }
+
+ /* make event */
+ e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = qbuf->user_data;
+ e->event.prop_id = qbuf->prop_id;
+ e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = ipp_free_event;
+ mutex_lock(&c_node->event_lock);
+ list_add_tail(&e->base.link, &c_node->event_list);
+ mutex_unlock(&c_node->event_lock);
+
+ return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e, *te;
+ int count = 0;
+
+ mutex_lock(&c_node->event_lock);
+ list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+ DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
+
+ /*
+ * qbuf == NULL condition means all event deletion.
+ * stop operations want to delete all event list.
+ * another case delete only same buf id.
+ */
+ if (!qbuf) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+
+ /* compare buffer id */
+ if (qbuf && (qbuf->buf_id ==
+ e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&c_node->event_lock);
+ return;
+}
+
+static void ipp_handle_cmd_work(struct device *dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_work *cmd_work,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ cmd_work->ippdrv = ippdrv;
+ cmd_work->c_node = c_node;
+ queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_property *property;
+ struct exynos_drm_ipp_ops *ops;
+ int ret;
+
+ ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EFAULT;
+ }
+
+ ops = ippdrv->ops[qbuf->ops_id];
+ if (!ops) {
+ DRM_ERROR("failed to get ops.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("bypass for invalid state.\n");
+ return 0;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+ if (!ipp_check_mem_list(c_node)) {
+ mutex_unlock(&c_node->mem_lock);
+ DRM_DEBUG_KMS("empty memory.\n");
+ return 0;
+ }
+
+ /*
+ * If set destination buffer and enabled clock,
+ * then m2m operations need start operations at queue_buf
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+ cmd_work->ctrl = IPP_CTRL_PLAY;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ } else {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ mutex_unlock(&c_node->mem_lock);
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ mutex_unlock(&c_node->mem_lock);
+
+ return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+ /* delete list */
+ mutex_lock(&c_node->mem_lock);
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[qbuf->ops_id], list) {
+ if (m_node->buf_id == qbuf->buf_id &&
+ m_node->ops_id == qbuf->ops_id)
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ }
+ mutex_unlock(&c_node->mem_lock);
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_queue_buf *qbuf = data;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ int ret;
+
+ if (!qbuf) {
+ DRM_ERROR("invalid buf parameter.\n");
+ return -EINVAL;
+ }
+
+ if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+ DRM_ERROR("invalid ops parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+ qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+ qbuf->buf_id, qbuf->buf_type);
+
+ /* find command node */
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ qbuf->prop_id);
+ if (IS_ERR(c_node)) {
+ DRM_ERROR("failed to get command node.\n");
+ return PTR_ERR(c_node);
+ }
+
+ /* buffer control */
+ switch (qbuf->buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* get memory node */
+ m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+ if (IS_ERR(m_node)) {
+ DRM_ERROR("failed to get m_node.\n");
+ return PTR_ERR(m_node);
+ }
+
+ /*
+ * first step get event for destination buffer.
+ * and second step when M2M case run with destination buffer
+ * if needed.
+ */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+ /* get event for destination buffer */
+ ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to get event.\n");
+ goto err_clean_node;
+ }
+
+ /*
+ * M2M case run play control for streaming feature.
+ * other case set address and waiting.
+ */
+ ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to run command.\n");
+ goto err_clean_node;
+ }
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ mutex_lock(&c_node->lock);
+
+ /* put event for destination buffer */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+ ipp_put_event(c_node, qbuf);
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+ mutex_unlock(&c_node->lock);
+ break;
+ default:
+ DRM_ERROR("invalid buffer control.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_clean_node:
+ DRM_ERROR("clean memory nodes.\n");
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+ return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+ enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+ if (ctrl != IPP_CTRL_PLAY) {
+ if (pm_runtime_suspended(dev)) {
+ DRM_ERROR("pm:runtime_suspended.\n");
+ goto err_status;
+ }
+ }
+
+ switch (ctrl) {
+ case IPP_CTRL_PLAY:
+ if (state != IPP_STATE_IDLE)
+ goto err_status;
+ break;
+ case IPP_CTRL_STOP:
+ if (state == IPP_STATE_STOP)
+ goto err_status;
+ break;
+ case IPP_CTRL_PAUSE:
+ if (state != IPP_STATE_START)
+ goto err_status;
+ break;
+ case IPP_CTRL_RESUME:
+ if (state != IPP_STATE_STOP)
+ goto err_status;
+ break;
+ default:
+ DRM_ERROR("invalid state.\n");
+ goto err_status;
+ }
+
+ return true;
+
+err_status:
+ DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+ return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+ struct drm_exynos_ipp_cmd_node *c_node;
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!cmd_ctrl) {
+ DRM_ERROR("invalid control parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return PTR_ERR(ippdrv);
+ }
+
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ cmd_ctrl->prop_id);
+ if (IS_ERR(c_node)) {
+ DRM_ERROR("invalid command node list.\n");
+ return PTR_ERR(c_node);
+ }
+
+ if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+ c_node->state)) {
+ DRM_ERROR("invalid state.\n");
+ return -EINVAL;
+ }
+
+ switch (cmd_ctrl->ctrl) {
+ case IPP_CTRL_PLAY:
+ if (pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_get_sync(ippdrv->dev);
+
+ c_node->state = IPP_STATE_START;
+
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ break;
+ case IPP_CTRL_STOP:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(300))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ ippdrv->dedicated = false;
+ mutex_lock(&ippdrv->cmd_lock);
+ ipp_clean_cmd_node(ctx, c_node);
+
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ mutex_unlock(&ippdrv->cmd_lock);
+ break;
+ case IPP_CTRL_PAUSE:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ break;
+ case IPP_CTRL_RESUME:
+ c_node->state = IPP_STATE_START;
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ break;
+ default:
+ DRM_ERROR("could not support this state currently.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ bool swap = false;
+ int ret, i;
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
+
+ /* reset h/w block */
+ if (ippdrv->reset &&
+ ippdrv->reset(ippdrv->dev)) {
+ DRM_ERROR("failed to reset.\n");
+ return -EINVAL;
+ }
+
+ /* set source,destination operations */
+ for_each_ipp_ops(i) {
+ struct drm_exynos_ipp_config *config =
+ &property->config[i];
+
+ ops = ippdrv->ops[i];
+ if (!ops || !config) {
+ DRM_ERROR("not support ops and config.\n");
+ return -EINVAL;
+ }
+
+ /* set format */
+ if (ops->set_fmt) {
+ ret = ops->set_fmt(ippdrv->dev, config->fmt);
+ if (ret) {
+ DRM_ERROR("not support format.\n");
+ return ret;
+ }
+ }
+
+ /* set transform for rotation, flip */
+ if (ops->set_transf) {
+ ret = ops->set_transf(ippdrv->dev, config->degree,
+ config->flip, &swap);
+ if (ret) {
+ DRM_ERROR("not support tranf.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* set size */
+ if (ops->set_size) {
+ ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+ &config->sz);
+ if (ret) {
+ DRM_ERROR("not support size.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
+
+ /* store command info in ippdrv */
+ ippdrv->c_node = c_node;
+
+ mutex_lock(&c_node->mem_lock);
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("empty memory.\n");
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ /* set current property in ippdrv */
+ ret = ipp_set_property(ippdrv, property);
+ if (ret) {
+ DRM_ERROR("failed to set property.\n");
+ ippdrv->c_node = NULL;
+ goto err_unlock;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("failed to get node.\n");
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
+
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ goto err_unlock;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ mutex_unlock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
+
+ /* start operations */
+ if (ippdrv->start) {
+ ret = ippdrv->start(ippdrv->dev, property->cmd);
+ if (ret) {
+ DRM_ERROR("failed to start ops.\n");
+ ippdrv->c_node = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ ippdrv->c_node = NULL;
+ return ret;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret = 0, i;
+
+ DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
+
+ /* put event */
+ ipp_put_event(c_node, NULL);
+
+ mutex_lock(&c_node->mem_lock);
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ list_for_each_entry_safe(m_node, tm_node,
+ head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+err_clear:
+ mutex_unlock(&c_node->mem_lock);
+
+ /* stop operations */
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, property->cmd);
+
+ return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work =
+ (struct drm_exynos_ipp_cmd_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_property *property;
+ int ret;
+
+ ippdrv = cmd_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("invalid ippdrv list.\n");
+ return;
+ }
+
+ c_node = cmd_work->c_node;
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return;
+ }
+
+ mutex_lock(&c_node->lock);
+
+ property = &c_node->property;
+
+ switch (cmd_work->ctrl) {
+ case IPP_CTRL_PLAY:
+ case IPP_CTRL_RESUME:
+ ret = ipp_start_property(ippdrv, c_node);
+ if (ret) {
+ DRM_ERROR("failed to start property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ /*
+ * M2M case supports wait_completion of transfer.
+ * because M2M case supports single unit operation
+ * with multiple queue.
+ * M2M need to wait completion of data transfer.
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ if (!wait_for_completion_timeout
+ (&c_node->start_complete, msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout event:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CTRL_STOP:
+ case IPP_CTRL_PAUSE:
+ ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+ c_node);
+ if (ret) {
+ DRM_ERROR("failed to stop property.\n");
+ goto err_unlock;
+ }
+
+ complete(&c_node->stop_complete);
+ break;
+ default:
+ DRM_ERROR("unknown control type\n");
+ break;
+ }
+
+ DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
+
+err_unlock:
+ mutex_unlock(&c_node->lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_queue_buf qbuf;
+ struct drm_exynos_ipp_send_event *e;
+ struct list_head *head;
+ struct timeval now;
+ unsigned long flags;
+ u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+ int ret, i;
+
+ for_each_ipp_ops(i)
+ DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
+
+ if (!drm_dev) {
+ DRM_ERROR("failed to get drm_dev.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&c_node->event_lock);
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("event list is empty.\n");
+ ret = 0;
+ goto err_event_unlock;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("empty memory.\n");
+ ret = 0;
+ goto err_mem_unlock;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ ret = -ENOMEM;
+ goto err_mem_unlock;
+ }
+
+ tbuf_id[i] = m_node->buf_id;
+ DRM_DEBUG_KMS("%s buf_id[%d]\n",
+ i ? "dst" : "src", tbuf_id[i]);
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ }
+ break;
+ case IPP_CMD_WB:
+ /* clear buf for finding */
+ memset(&qbuf, 0x0, sizeof(qbuf));
+ qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+ qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+ /* get memory node entry */
+ m_node = ipp_find_mem_node(c_node, &qbuf);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ ret = -ENOMEM;
+ goto err_mem_unlock;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ ret = -ENOMEM;
+ goto err_mem_unlock;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_mem_unlock;
+ }
+ mutex_unlock(&c_node->mem_lock);
+
+ if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+ DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+ tbuf_id[1], buf_id[1], property->prop_id);
+
+ /*
+ * command node have event list of destination buffer
+ * If destination buffer enqueue to mem list,
+ * then we make event and link to event list tail.
+ * so, we get first event for first enqueued buffer.
+ */
+ e = list_first_entry(&c_node->event_list,
+ struct drm_exynos_ipp_send_event, base.link);
+
+ do_gettimeofday(&now);
+ DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.prop_id = property->prop_id;
+
+ /* set buffer id about source destination */
+ for_each_ipp_ops(i)
+ e->event.buf_id[i] = tbuf_id[i];
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ mutex_unlock(&c_node->event_lock);
+
+ DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
+ property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+ return 0;
+
+err_mem_unlock:
+ mutex_unlock(&c_node->mem_lock);
+err_event_unlock:
+ mutex_unlock(&c_node->event_lock);
+ return ret;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+ struct drm_exynos_ipp_event_work *event_work =
+ (struct drm_exynos_ipp_event_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret;
+
+ if (!event_work) {
+ DRM_ERROR("failed to get event_work.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+ ippdrv = event_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return;
+ }
+
+ c_node = ippdrv->c_node;
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return;
+ }
+
+ /*
+ * IPP supports command thread, event thread synchronization.
+ * If IPP close immediately from user land, then IPP make
+ * synchronization with command thread, so make complete event.
+ * or going out operations.
+ */
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
+ c_node->state, c_node->property.prop_id);
+ goto err_completion;
+ }
+
+ ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+ if (ret) {
+ DRM_ERROR("failed to send event.\n");
+ goto err_completion;
+ }
+
+err_completion:
+ if (ipp_is_m2m_cmd(c_node->property.cmd))
+ complete(&c_node->start_complete);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret, count = 0;
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ u32 ipp_id;
+
+ ippdrv->drm_dev = drm_dev;
+
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+ &ipp_id);
+ if (ret || ipp_id == 0) {
+ DRM_ERROR("failed to create id.\n");
+ goto err;
+ }
+
+ DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
+ count++, (int)ippdrv, ipp_id);
+
+ ippdrv->prop_list.ipp_id = ipp_id;
+
+ /* store parent device for node */
+ ippdrv->parent_dev = dev;
+
+ /* store event work queue and handler */
+ ippdrv->event_workq = ctx->event_workq;
+ ippdrv->sched_event = ipp_sched_event;
+ INIT_LIST_HEAD(&ippdrv->cmd_list);
+ mutex_init(&ippdrv->cmd_lock);
+
+ if (is_drm_iommu_supported(drm_dev)) {
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+
+err:
+ /* get ipp driver entry */
+ list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
+ drv_list) {
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+ ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
+ ippdrv->prop_list.ipp_id);
+ }
+
+ return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+ ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
+ ippdrv->prop_list.ipp_id);
+
+ ippdrv->drm_dev = NULL;
+ exynos_drm_ippdrv_unregister(ippdrv);
+ }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+ file_priv->ipp_priv = priv;
+
+ INIT_LIST_HEAD(&priv->event_list);
+
+ DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
+
+ return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
+
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ mutex_lock(&ippdrv->cmd_lock);
+ list_for_each_entry_safe(c_node, tc_node,
+ &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
+ count++, (int)ippdrv);
+
+ if (c_node->priv == priv) {
+ /*
+ * userland goto unnormal state. process killed.
+ * and close the file.
+ * so, IPP didn't called stop cmd ctrl.
+ * so, we are make stop operation in this state.
+ */
+ if (c_node->state == IPP_STATE_START) {
+ ipp_stop_property(drm_dev, ippdrv,
+ c_node);
+ c_node->state = IPP_STATE_STOP;
+ }
+
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(ctx, c_node);
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+ }
+ mutex_unlock(&ippdrv->cmd_lock);
+ }
+
+ kfree(priv);
+ return;
+}
+
+static int ipp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipp_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_init(&ctx->ipp_lock);
+ mutex_init(&ctx->prop_lock);
+
+ idr_init(&ctx->ipp_idr);
+ idr_init(&ctx->prop_idr);
+
+ /*
+ * create single thread for ipp event
+ * IPP supports event thread for IPP drivers.
+ * IPP driver send event_work to this thread.
+ * and IPP event thread send event to user process.
+ */
+ ctx->event_workq = create_singlethread_workqueue("ipp_event");
+ if (!ctx->event_workq) {
+ dev_err(dev, "failed to create event workqueue\n");
+ return -EINVAL;
+ }
+
+ /*
+ * create single thread for ipp command
+ * IPP supports command thread for user process.
+ * user process make command node using set property ioctl.
+ * and make start_work and send this work to command thread.
+ * and then this command thread start property.
+ */
+ ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+ if (!ctx->cmd_workq) {
+ dev_err(dev, "failed to create cmd workqueue\n");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ /* set sub driver informations */
+ subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = ipp_subdrv_probe;
+ subdrv->remove = ipp_subdrv_remove;
+ subdrv->open = ipp_subdrv_open;
+ subdrv->close = ipp_subdrv_close;
+
+ platform_set_drvdata(pdev, ctx);
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm ipp device.\n");
+ goto err_cmd_workq;
+ }
+
+ dev_info(dev, "drm ipp registered successfully.\n");
+
+ return 0;
+
+err_cmd_workq:
+ destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+ destroy_workqueue(ctx->event_workq);
+ return ret;
+}
+
+static int ipp_remove(struct platform_device *pdev)
+{
+ struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+ /* unregister sub driver */
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ /* remove,destroy ipp idr */
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+
+ mutex_destroy(&ctx->ipp_lock);
+ mutex_destroy(&ctx->prop_lock);
+
+ /* destroy command, event work queue */
+ destroy_workqueue(ctx->cmd_workq);
+ destroy_workqueue(ctx->event_workq);
+
+ return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ if (!pm_runtime_suspended(dev))
+ return ipp_power_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+ SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+ .probe = ipp_probe,
+ .remove = ipp_remove,
+ .driver = {
+ .name = "exynos-drm-ipp",
+ .owner = THIS_MODULE,
+ .pm = &ipp_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 00000000000..7aaeaae757c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+ IPP_STATE_IDLE,
+ IPP_STATE_START,
+ IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ enum drm_exynos_ipp_ctrl ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private information.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+ struct exynos_drm_ipp_private *priv;
+ struct list_head list;
+ struct list_head event_list;
+ struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
+ struct mutex lock;
+ struct mutex mem_lock;
+ struct mutex event_lock;
+ struct completion start_complete;
+ struct completion stop_complete;
+ struct drm_exynos_ipp_property property;
+ struct drm_exynos_ipp_cmd_work *start_work;
+ struct drm_exynos_ipp_cmd_work *stop_work;
+ struct drm_exynos_ipp_event_work *event_work;
+ enum drm_exynos_ipp_state state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @handles: Y, Cb, Cr each gem object handle.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+ unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
+ dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting information.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+ __u32 enable;
+ __u32 refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+ int (*set_fmt)(struct device *dev, u32 fmt);
+ int (*set_transf)(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap);
+ int (*set_size)(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+ int (*set_addr)(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @c_node: current command information.
+ * @cmd_list: list head for command information.
+ * @cmd_lock: lock for synchronization of access to cmd_list.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+ struct list_head drv_list;
+ struct device *parent_dev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ bool dedicated;
+ struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
+ struct workqueue_struct *event_workq;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct list_head cmd_list;
+ struct mutex cmd_lock;
+ struct drm_exynos_ipp_prop_list prop_list;
+
+ int (*check_property)(struct device *dev,
+ struct drm_exynos_ipp_property *property);
+ int (*reset)(struct device *dev);
+ int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644
index 00000000000..8371cbd7631
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
+
+#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
+
+struct exynos_plane {
+ struct drm_plane base;
+ struct exynos_drm_overlay overlay;
+ bool enabled;
+};
+
+static const uint32_t formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV12MT,
+};
+
+/*
+ * This function is to get X or Y size shown via screen. This needs length and
+ * start position of CRTC.
+ *
+ * <--- length --->
+ * CRTC ----------------
+ * ^ start ^ end
+ *
+ * There are six cases from a to f.
+ *
+ * <----- SCREEN ----->
+ * 0 last
+ * ----------|------------------|----------
+ * CRTCs
+ * a -------
+ * b -------
+ * c --------------------------
+ * d --------
+ * e -------
+ * f -------
+ */
+static int exynos_plane_get_size(int start, unsigned length, unsigned last)
+{
+ int end = start + length;
+ int size = 0;
+
+ if (start <= 0) {
+ if (end > 0)
+ size = min_t(unsigned, end, last);
+ } else if (start <= last) {
+ size = min_t(unsigned, last - start, length);
+ }
+
+ return size;
+}
+
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+ unsigned int actual_w;
+ unsigned int actual_h;
+ int nr;
+ int i;
+
+ nr = exynos_drm_fb_get_buf_cnt(fb);
+ for (i = 0; i < nr; i++) {
+ struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
+
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null\n");
+ return -EFAULT;
+ }
+
+ overlay->dma_addr[i] = buffer->dma_addr;
+
+ DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->dma_addr[i]);
+ }
+
+ actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
+ actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay);
+
+ if (crtc_x < 0) {
+ if (actual_w)
+ src_x -= crtc_x;
+ crtc_x = 0;
+ }
+
+ if (crtc_y < 0) {
+ if (actual_h)
+ src_y -= crtc_y;
+ crtc_y = 0;
+ }
+
+ /* set drm framebuffer data. */
+ overlay->fb_x = src_x;
+ overlay->fb_y = src_y;
+ overlay->fb_width = fb->width;
+ overlay->fb_height = fb->height;
+ overlay->src_width = src_w;
+ overlay->src_height = src_h;
+ overlay->bpp = fb->bits_per_pixel;
+ overlay->pitch = fb->pitches[0];
+ overlay->pixel_format = fb->pixel_format;
+
+ /* set overlay range to be displayed. */
+ overlay->crtc_x = crtc_x;
+ overlay->crtc_y = crtc_y;
+ overlay->crtc_width = actual_w;
+ overlay->crtc_height = actual_h;
+
+ /* set drm mode data. */
+ overlay->mode_width = crtc->mode.hdisplay;
+ overlay->mode_height = crtc->mode.vdisplay;
+ overlay->refresh = crtc->mode.vrefresh;
+ overlay->scan_flag = crtc->mode.flags;
+
+ DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
+ overlay->crtc_x, overlay->crtc_y,
+ overlay->crtc_width, overlay->crtc_height);
+
+ exynos_drm_crtc_plane_mode_set(crtc, overlay);
+
+ return 0;
+}
+
+void exynos_plane_commit(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ exynos_drm_crtc_plane_commit(plane->crtc, overlay->zpos);
+}
+
+void exynos_plane_dpms(struct drm_plane *plane, int mode)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ if (exynos_plane->enabled)
+ return;
+
+ exynos_drm_crtc_plane_enable(plane->crtc, overlay->zpos);
+ exynos_plane->enabled = true;
+ } else {
+ if (!exynos_plane->enabled)
+ return;
+
+ exynos_drm_crtc_plane_disable(plane->crtc, overlay->zpos);
+ exynos_plane->enabled = false;
+ }
+}
+
+static int
+exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ int ret;
+
+ ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
+ crtc_w, crtc_h, src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+ if (ret < 0)
+ return ret;
+
+ plane->crtc = crtc;
+
+ exynos_plane_commit(plane);
+ exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
+
+ return 0;
+}
+
+static int exynos_disable_plane(struct drm_plane *plane)
+{
+ exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+
+ return 0;
+}
+
+static void exynos_plane_destroy(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+
+ exynos_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(exynos_plane);
+}
+
+static int exynos_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+
+ if (property == dev_priv->plane_zpos_property) {
+ exynos_plane->overlay.zpos = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct drm_plane_funcs exynos_plane_funcs = {
+ .update_plane = exynos_update_plane,
+ .disable_plane = exynos_disable_plane,
+ .destroy = exynos_plane_destroy,
+ .set_property = exynos_plane_set_property,
+};
+
+static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->plane_zpos_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 0,
+ MAX_PLANE - 1);
+ if (!prop)
+ return;
+
+ dev_priv->plane_zpos_property = prop;
+ }
+
+ drm_object_attach_property(&plane->base, prop, 0);
+}
+
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+ unsigned long possible_crtcs, bool priv)
+{
+ struct exynos_plane *exynos_plane;
+ int err;
+
+ exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+ if (!exynos_plane)
+ return NULL;
+
+ err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+ &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
+ priv);
+ if (err) {
+ DRM_ERROR("failed to initialize plane\n");
+ kfree(exynos_plane);
+ return NULL;
+ }
+
+ if (priv)
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ else
+ exynos_plane_attach_zpos_property(&exynos_plane->base);
+
+ return &exynos_plane->base;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644
index 00000000000..84d464c90d3
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+void exynos_plane_commit(struct drm_plane *plane);
+void exynos_plane_dpms(struct drm_plane *plane, int mode);
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+ unsigned long possible_crtcs, bool priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 00000000000..f01fbb6dc1f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct rot_context, ippdrv);
+#define rot_read(offset) readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+ ROT_IRQ_STATUS_COMPLETE = 8,
+ ROT_IRQ_STATUS_ILLEGAL = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+ u32 min_w;
+ u32 min_h;
+ u32 max_w;
+ u32 max_h;
+ u32 align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+ struct rot_limit ycbcr420_2p;
+ struct rot_limit rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct clk *clock;
+ struct rot_limit_table *limit_tbl;
+ int irq;
+ int cur_buf_id[EXYNOS_DRM_OPS_MAX];
+ bool suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+ u32 val = rot_read(ROT_CONFIG);
+
+ if (enable == true)
+ val |= ROT_CONFIG_IRQ;
+ else
+ val &= ~ROT_CONFIG_IRQ;
+
+ rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_CONTROL);
+
+ val &= ROT_CONTROL_FMT_MASK;
+
+ return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_STATUS);
+
+ val = ROT_STATUS_IRQ(val);
+
+ if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+ return ROT_IRQ_STATUS_COMPLETE;
+
+ return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+ struct rot_context *rot = arg;
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+ enum rot_irq_status irq_status;
+ u32 val;
+
+ /* Get execution result */
+ irq_status = rotator_reg_get_irq_status(rot);
+
+ /* clear status */
+ val = rot_read(ROT_STATUS);
+ val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+ rot_write(val, ROT_STATUS);
+
+ if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ } else {
+ DRM_ERROR("the SFR is set illegally\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+ u32 *vsize)
+{
+ struct rot_limit_table *limit_tbl = rot->limit_tbl;
+ struct rot_limit *limit;
+ u32 mask, val;
+
+ /* Get size limit */
+ if (fmt == ROT_CONTROL_FMT_RGB888)
+ limit = &limit_tbl->rgb888;
+ else
+ limit = &limit_tbl->ycbcr420_2p;
+
+ /* Get mask for rounding to nearest aligned val */
+ mask = ~((1 << limit->align) - 1);
+
+ /* Set aligned width */
+ val = ROT_ALIGN(*hsize, limit->align, mask);
+ if (val < limit->min_w)
+ *hsize = ROT_MIN(limit->min_w, mask);
+ else if (val > limit->max_w)
+ *hsize = ROT_MAX(limit->max_w, mask);
+ else
+ *hsize = val;
+
+ /* Set aligned height */
+ val = ROT_ALIGN(*vsize, limit->align, mask);
+ if (val < limit->min_h)
+ *vsize = ROT_MIN(limit->min_h, mask);
+ else if (val > limit->max_h)
+ *vsize = ROT_MAX(limit->max_h, mask);
+ else
+ *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FMT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ val |= ROT_CONTROL_FMT_YCBCR420_2P;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= ROT_CONTROL_FMT_RGB888;
+ break;
+ default:
+ DRM_ERROR("invalid image format\n");
+ return -EINVAL;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+ (fmt == ROT_CONTROL_FMT_RGB888))
+ return true;
+
+ return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
+ u32 val;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("invalid format.\n");
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_SRC_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_SRC_CROP_POS);
+ val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+ rot_write(val, ROT_SRC_CROP_SIZE);
+
+ return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("invalid format.\n");
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+ val = rot_read(ROT_SRC_BUF_SIZE);
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ /* Set transform configuration */
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FLIP_MASK;
+
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ val |= ROT_CONTROL_FLIP_VERTICAL;
+ break;
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
+ break;
+ default:
+ /* Flip None */
+ break;
+ }
+
+ val &= ~ROT_CONTROL_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ val |= ROT_CONTROL_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ val |= ROT_CONTROL_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ val |= ROT_CONTROL_ROT_270;
+ break;
+ default:
+ /* Rotation 0 Degree */
+ break;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ /* Check degree for setting buffer size swap */
+ if ((degree == EXYNOS_DRM_DEGREE_90) ||
+ (degree == EXYNOS_DRM_DEGREE_270))
+ *swap = true;
+ else
+ *swap = false;
+
+ return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val, fmt, hsize, vsize;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("invalid format.\n");
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_DST_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_DST_CROP_POS);
+
+ return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("invalid format.\n");
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+ /* Get buf size */
+ val = rot_read(ROT_DST_BUF_SIZE);
+
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_DST_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+ .set_fmt = rotator_src_set_fmt,
+ .set_size = rotator_src_set_size,
+ .set_addr = rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+ .set_transf = rotator_dst_set_transf,
+ .set_size = rotator_dst_set_size,
+ .set_addr = rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
+
+ prop_list->version = 1;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 0;
+ prop_list->crop = 0;
+ prop_list->scale = 0;
+
+ return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ DRM_DEBUG_KMS("not support format\n");
+ return false;
+ }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("invalid flip\n");
+ return false;
+ }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct drm_exynos_ipp_config *src_config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_ipp_config *dst_config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos *src_pos = &src_config->pos;
+ struct drm_exynos_pos *dst_pos = &dst_config->pos;
+ struct drm_exynos_sz *src_sz = &src_config->sz;
+ struct drm_exynos_sz *dst_sz = &dst_config->sz;
+ bool swap = false;
+
+ /* Check format configuration */
+ if (src_config->fmt != dst_config->fmt) {
+ DRM_DEBUG_KMS("not support csc feature\n");
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_fmt(dst_config->fmt)) {
+ DRM_DEBUG_KMS("invalid format\n");
+ return -EINVAL;
+ }
+
+ /* Check transform configuration */
+ if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+ DRM_DEBUG_KMS("not support source-side rotation\n");
+ return -EINVAL;
+ }
+
+ switch (dst_config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("invalid degree\n");
+ return -EINVAL;
+ }
+
+ if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+ DRM_DEBUG_KMS("not support source-side flip\n");
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_flip(dst_config->flip)) {
+ DRM_DEBUG_KMS("invalid flip\n");
+ return -EINVAL;
+ }
+
+ /* Check size configuration */
+ if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+ (src_pos->y + src_pos->h > src_sz->vsize)) {
+ DRM_DEBUG_KMS("out of source buffer bound\n");
+ return -EINVAL;
+ }
+
+ if (swap) {
+ if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+ (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+ DRM_DEBUG_KMS("out of destination buffer bound\n");
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+ DRM_DEBUG_KMS("not support scale feature\n");
+ return -EINVAL;
+ }
+ } else {
+ if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+ (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+ DRM_DEBUG_KMS("out of destination buffer bound\n");
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+ DRM_DEBUG_KMS("not support scale feature\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ if (rot->suspended) {
+ DRM_ERROR("suspended state\n");
+ return -EPERM;
+ }
+
+ if (cmd != IPP_CMD_M2M) {
+ DRM_ERROR("not support cmd: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set interrupt enable */
+ rotator_reg_set_irq(rot, true);
+
+ val = rot_read(ROT_CONTROL);
+ val |= ROT_CONTROL_START;
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static struct rot_limit_table rot_limit_tbl_4210 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_64K,
+ .max_h = SZ_64K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_16K,
+ .max_h = SZ_16K,
+ .align = 2,
+ },
+};
+
+static struct rot_limit_table rot_limit_tbl_4x12 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+static struct rot_limit_table rot_limit_tbl_5250 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 1,
+ },
+};
+
+static const struct of_device_id exynos_rotator_match[] = {
+ {
+ .compatible = "samsung,exynos4210-rotator",
+ .data = &rot_limit_tbl_4210,
+ },
+ {
+ .compatible = "samsung,exynos4212-rotator",
+ .data = &rot_limit_tbl_4x12,
+ },
+ {
+ .compatible = "samsung,exynos5250-rotator",
+ .data = &rot_limit_tbl_5250,
+ },
+ {},
+};
+
+static int rotator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot;
+ struct exynos_drm_ippdrv *ippdrv;
+ const struct of_device_id *match;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "cannot find of_node.\n");
+ return -ENODEV;
+ }
+
+ rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+ if (!rot)
+ return -ENOMEM;
+
+ match = of_match_node(exynos_rotator_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "failed to match node\n");
+ return -ENODEV;
+ }
+ rot->limit_tbl = (struct rot_limit_table *)match->data;
+
+ rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rot->regs = devm_ioremap_resource(dev, rot->regs_res);
+ if (IS_ERR(rot->regs))
+ return PTR_ERR(rot->regs);
+
+ rot->irq = platform_get_irq(pdev, 0);
+ if (rot->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ return rot->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, rot->irq, NULL,
+ rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ rot->clock = devm_clk_get(dev, "rotator");
+ if (IS_ERR(rot->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(rot->clock);
+ }
+
+ pm_runtime_enable(dev);
+
+ ippdrv = &rot->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+ ippdrv->check_property = rotator_ippdrv_check_property;
+ ippdrv->start = rotator_ippdrv_start;
+ ret = rotator_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_ippdrv_register;
+ }
+
+ DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv);
+
+ platform_set_drvdata(pdev, rot);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm rotator device\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(dev, "The exynos rotator is probed successfully\n");
+
+ return 0;
+
+err_ippdrv_register:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int rotator_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot = dev_get_drvdata(dev);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+ if (enable) {
+ clk_enable(rot->clock);
+ rot->suspended = false;
+ } else {
+ clk_disable(rot->clock);
+ rot->suspended = true;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ if (!pm_runtime_suspended(dev))
+ return rotator_clk_crtl(rot, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ return rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+ NULL)
+};
+
+struct platform_driver rotator_driver = {
+ .probe = rotator_probe,
+ .remove = rotator_remove,
+ .driver = {
+ .name = "exynos-rot",
+ .owner = THIS_MODULE,
+ .pm = &rotator_pm_ops,
+ .of_match_table = exynos_rotator_match,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 00000000000..71a0b4c0c1e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_ROTATOR_H_
+#define _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
new file mode 100644
index 00000000000..2fb8705d646
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -0,0 +1,702 @@
+/* exynos_drm_vidi.c
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <drm/drmP.h>
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <drm/exynos_drm.h>
+
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_vidi.h"
+
+/* vidi has totally three virtual windows. */
+#define WINDOWS_NR 3
+
+#define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev))
+#define ctx_from_connector(c) container_of(c, struct vidi_context, \
+ connector)
+
+struct vidi_win_data {
+ unsigned int offset_x;
+ unsigned int offset_y;
+ unsigned int ovl_width;
+ unsigned int ovl_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int bpp;
+ dma_addr_t dma_addr;
+ unsigned int buf_offsize;
+ unsigned int line_size; /* bytes */
+ bool enabled;
+};
+
+struct vidi_context {
+ struct drm_device *drm_dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector connector;
+ struct exynos_drm_subdrv subdrv;
+ struct vidi_win_data win_data[WINDOWS_NR];
+ struct edid *raw_edid;
+ unsigned int clkdiv;
+ unsigned int default_win;
+ unsigned long irq_flags;
+ unsigned int connected;
+ bool vblank_on;
+ bool suspended;
+ bool direct_vblank;
+ struct work_struct work;
+ struct mutex lock;
+ int pipe;
+};
+
+static const char fake_edid_info[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
+ 0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd,
+ 0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00,
+ 0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e,
+ 0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00,
+ 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1,
+ 0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83,
+ 0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00,
+ 0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c,
+ 0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a,
+ 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00,
+ 0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x06
+};
+
+static void vidi_apply(struct exynos_drm_manager *mgr)
+{
+ struct vidi_context *ctx = mgr->ctx;
+ struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+ struct vidi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ if (win_data->enabled && (mgr_ops && mgr_ops->win_commit))
+ mgr_ops->win_commit(mgr, i);
+ }
+
+ if (mgr_ops && mgr_ops->commit)
+ mgr_ops->commit(mgr);
+}
+
+static void vidi_commit(struct exynos_drm_manager *mgr)
+{
+ struct vidi_context *ctx = mgr->ctx;
+
+ if (ctx->suspended)
+ return;
+}
+
+static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
+{
+ struct vidi_context *ctx = mgr->ctx;
+
+ if (ctx->suspended)
+ return -EPERM;
+
+ if (!test_and_set_bit(0, &ctx->irq_flags))
+ ctx->vblank_on = true;
+
+ ctx->direct_vblank = true;
+
+ /*
+ * in case of page flip request, vidi_finish_pageflip function
+ * will not be called because direct_vblank is true and then
+ * that function will be called by manager_ops->win_commit callback
+ */
+ schedule_work(&ctx->work);
+
+ return 0;
+}
+
+static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
+{
+ struct vidi_context *ctx = mgr->ctx;
+
+ if (ctx->suspended)
+ return;
+
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ ctx->vblank_on = false;
+}
+
+static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay)
+{
+ struct vidi_context *ctx = mgr->ctx;
+ struct vidi_win_data *win_data;
+ int win;
+ unsigned long offset;
+
+ if (!overlay) {
+ DRM_ERROR("overlay is NULL\n");
+ return;
+ }
+
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ offset = overlay->fb_x * (overlay->bpp >> 3);
+ offset += overlay->fb_y * overlay->pitch;
+
+ DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+
+ win_data = &ctx->win_data[win];
+
+ win_data->offset_x = overlay->crtc_x;
+ win_data->offset_y = overlay->crtc_y;
+ win_data->ovl_width = overlay->crtc_width;
+ win_data->ovl_height = overlay->crtc_height;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+ win_data->dma_addr = overlay->dma_addr[0] + offset;
+ win_data->bpp = overlay->bpp;
+ win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+ (overlay->bpp >> 3);
+ win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+
+ /*
+ * some parts of win_data should be transferred to user side
+ * through specific ioctl.
+ */
+
+ DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+ win_data->offset_x, win_data->offset_y);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
+ DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+ overlay->fb_width, overlay->crtc_width);
+}
+
+static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
+{
+ struct vidi_context *ctx = mgr->ctx;
+ struct vidi_win_data *win_data;
+ int win = zpos;
+
+ if (ctx->suspended)
+ return;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ win_data->enabled = true;
+
+ DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
+
+ if (ctx->vblank_on)
+ schedule_work(&ctx->work);
+}
+
+static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
+{
+ struct vidi_context *ctx = mgr->ctx;
+ struct vidi_win_data *win_data;
+ int win = zpos;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win >= WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+ win_data->enabled = false;
+
+ /* TODO. */
+}
+
+static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
+{
+ struct vidi_context *ctx = mgr->ctx;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (enable != false && enable != true)
+ return -EINVAL;
+
+ if (enable) {
+ ctx->suspended = false;
+
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ vidi_enable_vblank(mgr);
+
+ vidi_apply(mgr);
+ } else {
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
+{
+ struct vidi_context *ctx = mgr->ctx;
+
+ DRM_DEBUG_KMS("%d\n", mode);
+
+ mutex_lock(&ctx->lock);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vidi_power_on(mgr, true);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vidi_power_on(mgr, false);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ mutex_unlock(&ctx->lock);
+}
+
+static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
+ struct drm_device *drm_dev)
+{
+ struct vidi_context *ctx = mgr->ctx;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ mgr->drm_dev = ctx->drm_dev = drm_dev;
+ mgr->pipe = ctx->pipe = priv->pipe++;
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = 1, we can use the vblank feature.
+ *
+ * P.S. note that we wouldn't use drm irq handler but
+ * just specific driver own one instead because
+ * drm framework supports only one irq handler.
+ */
+ drm_dev->irq_enabled = 1;
+
+ /*
+ * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = 1;
+
+ return 0;
+}
+
+static struct exynos_drm_manager_ops vidi_manager_ops = {
+ .dpms = vidi_dpms,
+ .commit = vidi_commit,
+ .enable_vblank = vidi_enable_vblank,
+ .disable_vblank = vidi_disable_vblank,
+ .win_mode_set = vidi_win_mode_set,
+ .win_commit = vidi_win_commit,
+ .win_disable = vidi_win_disable,
+};
+
+static struct exynos_drm_manager vidi_manager = {
+ .type = EXYNOS_DISPLAY_TYPE_VIDI,
+ .ops = &vidi_manager_ops,
+};
+
+static void vidi_fake_vblank_handler(struct work_struct *work)
+{
+ struct vidi_context *ctx = container_of(work, struct vidi_context,
+ work);
+
+ if (ctx->pipe < 0)
+ return;
+
+ /* refresh rate is about 50Hz. */
+ usleep_range(16000, 20000);
+
+ mutex_lock(&ctx->lock);
+
+ if (ctx->direct_vblank) {
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ ctx->direct_vblank = false;
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+}
+
+static int vidi_show_connection(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rc;
+ struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
+ struct vidi_context *ctx = mgr->ctx;
+
+ mutex_lock(&ctx->lock);
+
+ rc = sprintf(buf, "%d\n", ctx->connected);
+
+ mutex_unlock(&ctx->lock);
+
+ return rc;
+}
+
+static int vidi_store_connection(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
+ struct vidi_context *ctx = mgr->ctx;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &ctx->connected);
+ if (ret)
+ return ret;
+
+ if (ctx->connected > 1)
+ return -EINVAL;
+
+ /* use fake edid data for test. */
+ if (!ctx->raw_edid)
+ ctx->raw_edid = (struct edid *)fake_edid_info;
+
+ /* if raw_edid isn't same as fake data then it can't be tested. */
+ if (ctx->raw_edid != (struct edid *)fake_edid_info) {
+ DRM_DEBUG_KMS("edid data is not fake data.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("requested connection.\n");
+
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ return len;
+}
+
+static DEVICE_ATTR(connection, 0644, vidi_show_connection,
+ vidi_store_connection);
+
+int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vidi_context *ctx = NULL;
+ struct drm_encoder *encoder;
+ struct exynos_drm_display *display;
+ struct drm_exynos_vidi_connection *vidi = data;
+
+ if (!vidi) {
+ DRM_DEBUG_KMS("user data for vidi is null.\n");
+ return -EINVAL;
+ }
+
+ if (vidi->connection > 1) {
+ DRM_DEBUG_KMS("connection should be 0 or 1.\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
+ head) {
+ display = exynos_drm_get_display(encoder);
+
+ if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
+ ctx = display->ctx;
+ break;
+ }
+ }
+
+ if (!ctx) {
+ DRM_DEBUG_KMS("not found virtual device type encoder.\n");
+ return -EINVAL;
+ }
+
+ if (ctx->connected == vidi->connection) {
+ DRM_DEBUG_KMS("same connection request.\n");
+ return -EINVAL;
+ }
+
+ if (vidi->connection) {
+ struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid;
+ if (!drm_edid_is_valid(raw_edid)) {
+ DRM_DEBUG_KMS("edid data is invalid.\n");
+ return -EINVAL;
+ }
+ ctx->raw_edid = drm_edid_duplicate(raw_edid);
+ if (!ctx->raw_edid) {
+ DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+ return -ENOMEM;
+ }
+ } else {
+ /*
+ * with connection = 0, free raw_edid
+ * only if raw edid data isn't same as fake data.
+ */
+ if (ctx->raw_edid && ctx->raw_edid !=
+ (struct edid *)fake_edid_info) {
+ kfree(ctx->raw_edid);
+ ctx->raw_edid = NULL;
+ }
+ }
+
+ ctx->connected = vidi->connection;
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ return 0;
+}
+
+static enum drm_connector_status vidi_detect(struct drm_connector *connector,
+ bool force)
+{
+ struct vidi_context *ctx = ctx_from_connector(connector);
+
+ /*
+ * connection request would come from user side
+ * to do hotplug through specific ioctl.
+ */
+ return ctx->connected ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void vidi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs vidi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = vidi_detect,
+ .destroy = vidi_connector_destroy,
+};
+
+static int vidi_get_modes(struct drm_connector *connector)
+{
+ struct vidi_context *ctx = ctx_from_connector(connector);
+ struct edid *edid;
+ int edid_len;
+
+ /*
+ * the edid data comes from user side and it would be set
+ * to ctx->raw_edid through specific ioctl.
+ */
+ if (!ctx->raw_edid) {
+ DRM_DEBUG_KMS("raw_edid is null.\n");
+ return -EFAULT;
+ }
+
+ edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEBUG_KMS("failed to allocate edid\n");
+ return -ENOMEM;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ return drm_add_edid_modes(connector, edid);
+}
+
+static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
+{
+ struct vidi_context *ctx = ctx_from_connector(connector);
+
+ return ctx->encoder;
+}
+
+static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
+ .get_modes = vidi_get_modes,
+ .best_encoder = vidi_best_encoder,
+};
+
+static int vidi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct vidi_context *ctx = display->ctx;
+ struct drm_connector *connector = &ctx->connector;
+ int ret;
+
+ ctx->encoder = encoder;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(ctx->drm_dev, connector,
+ &vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+
+static struct exynos_drm_display_ops vidi_display_ops = {
+ .create_connector = vidi_create_connector,
+};
+
+static struct exynos_drm_display vidi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_VIDI,
+ .ops = &vidi_display_ops,
+};
+
+static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
+ struct vidi_context *ctx = mgr->ctx;
+ struct drm_crtc *crtc = ctx->crtc;
+ int ret;
+
+ vidi_mgr_initialize(mgr, drm_dev);
+
+ ret = exynos_drm_crtc_create(&vidi_manager);
+ if (ret) {
+ DRM_ERROR("failed to create crtc.\n");
+ return ret;
+ }
+
+ ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display);
+ if (ret) {
+ crtc->funcs->destroy(crtc);
+ DRM_ERROR("failed to create encoder and connector.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vidi_probe(struct platform_device *pdev)
+{
+ struct exynos_drm_subdrv *subdrv;
+ struct vidi_context *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->default_win = 0;
+
+ INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
+
+ vidi_manager.ctx = ctx;
+ vidi_display.ctx = ctx;
+
+ mutex_init(&ctx->lock);
+
+ platform_set_drvdata(pdev, &vidi_manager);
+
+ subdrv = &ctx->subdrv;
+ subdrv->dev = &pdev->dev;
+ subdrv->probe = vidi_subdrv_probe;
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register drm vidi device\n");
+ return ret;
+ }
+
+ ret = device_create_file(&pdev->dev, &dev_attr_connection);
+ if (ret < 0) {
+ exynos_drm_subdrv_unregister(subdrv);
+ DRM_INFO("failed to create connection sysfs.\n");
+ }
+
+ return 0;
+}
+
+static int vidi_remove(struct platform_device *pdev)
+{
+ struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
+ struct vidi_context *ctx = mgr->ctx;
+ struct drm_encoder *encoder = ctx->encoder;
+ struct drm_crtc *crtc = mgr->crtc;
+
+ if (ctx->raw_edid != (struct edid *)fake_edid_info) {
+ kfree(ctx->raw_edid);
+ ctx->raw_edid = NULL;
+
+ return -EINVAL;
+ }
+
+ crtc->funcs->destroy(crtc);
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&ctx->connector);
+
+ return 0;
+}
+
+struct platform_driver vidi_driver = {
+ .probe = vidi_probe,
+ .remove = vidi_remove,
+ .driver = {
+ .name = "exynos-drm-vidi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int exynos_drm_probe_vidi(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_register_simple("exynos-drm-vidi", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ ret = platform_driver_register(&vidi_driver);
+ if (ret) {
+ platform_device_unregister(pdev);
+ return ret;
+ }
+
+ return ret;
+}
+
+void exynos_drm_remove_vidi(void)
+{
+ struct vidi_context *ctx = vidi_manager.ctx;
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct platform_device *pdev = to_platform_device(subdrv->dev);
+
+ platform_driver_unregister(&vidi_driver);
+ platform_device_unregister(pdev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
new file mode 100644
index 00000000000..1e5fdaa36cc
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
@@ -0,0 +1,22 @@
+/* exynos_drm_vidi.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_VIDI_H_
+#define _EXYNOS_DRM_VIDI_H_
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv);
+#else
+#define vidi_connection_ioctl NULL
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644
index 00000000000..aa259b0a873
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -0,0 +1,2515 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "regs-hdmi.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/hdmi.h>
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_mixer.h"
+
+#include <linux/gpio.h>
+#include <media/s5p_hdmi.h>
+
+#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
+#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
+
+#define HOTPLUG_DEBOUNCE_MS 1100
+
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION 0x02
+#define HDMI_AVI_LENGTH 0x0D
+
+/* AUI header info */
+#define HDMI_AUI_VERSION 0x01
+#define HDMI_AUI_LENGTH 0x0A
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8
+#define AVI_4_3_CENTER_RATIO 0x9
+#define AVI_16_9_CENTER_RATIO 0xa
+
+enum hdmi_type {
+ HDMI_TYPE13,
+ HDMI_TYPE14,
+};
+
+struct hdmi_driver_data {
+ unsigned int type;
+ const struct hdmiphy_config *phy_confs;
+ unsigned int phy_conf_count;
+ unsigned int is_apb_phy:1;
+};
+
+struct hdmi_resources {
+ struct clk *hdmi;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_pixel;
+ struct clk *sclk_hdmiphy;
+ struct clk *mout_hdmi;
+ struct regulator_bulk_data *regul_bulk;
+ int regul_count;
+};
+
+struct hdmi_tg_regs {
+ u8 cmd[1];
+ u8 h_fsz[2];
+ u8 hact_st[2];
+ u8 hact_sz[2];
+ u8 v_fsz[2];
+ u8 vsync[2];
+ u8 vsync2[2];
+ u8 vact_st[2];
+ u8 vact_sz[2];
+ u8 field_chg[2];
+ u8 vact_st2[2];
+ u8 vact_st3[2];
+ u8 vact_st4[2];
+ u8 vsync_top_hdmi[2];
+ u8 vsync_bot_hdmi[2];
+ u8 field_top_hdmi[2];
+ u8 field_bot_hdmi[2];
+ u8 tg_3d[1];
+};
+
+struct hdmi_v13_core_regs {
+ u8 h_blank[2];
+ u8 v_blank[3];
+ u8 h_v_line[3];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f[3];
+ u8 h_sync_gen[3];
+ u8 v_sync_gen1[3];
+ u8 v_sync_gen2[3];
+ u8 v_sync_gen3[3];
+};
+
+struct hdmi_v14_core_regs {
+ u8 h_blank[2];
+ u8 v2_blank[2];
+ u8 v1_blank[2];
+ u8 v_line[2];
+ u8 h_line[2];
+ u8 hsync_pol[1];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f0[2];
+ u8 v_blank_f1[2];
+ u8 h_sync_start[2];
+ u8 h_sync_end[2];
+ u8 v_sync_line_bef_2[2];
+ u8 v_sync_line_bef_1[2];
+ u8 v_sync_line_aft_2[2];
+ u8 v_sync_line_aft_1[2];
+ u8 v_sync_line_aft_pxl_2[2];
+ u8 v_sync_line_aft_pxl_1[2];
+ u8 v_blank_f2[2]; /* for 3D mode */
+ u8 v_blank_f3[2]; /* for 3D mode */
+ u8 v_blank_f4[2]; /* for 3D mode */
+ u8 v_blank_f5[2]; /* for 3D mode */
+ u8 v_sync_line_aft_3[2];
+ u8 v_sync_line_aft_4[2];
+ u8 v_sync_line_aft_5[2];
+ u8 v_sync_line_aft_6[2];
+ u8 v_sync_line_aft_pxl_3[2];
+ u8 v_sync_line_aft_pxl_4[2];
+ u8 v_sync_line_aft_pxl_5[2];
+ u8 v_sync_line_aft_pxl_6[2];
+ u8 vact_space_1[2];
+ u8 vact_space_2[2];
+ u8 vact_space_3[2];
+ u8 vact_space_4[2];
+ u8 vact_space_5[2];
+ u8 vact_space_6[2];
+};
+
+struct hdmi_v13_conf {
+ struct hdmi_v13_core_regs core;
+ struct hdmi_tg_regs tg;
+};
+
+struct hdmi_v14_conf {
+ struct hdmi_v14_core_regs core;
+ struct hdmi_tg_regs tg;
+};
+
+struct hdmi_conf_regs {
+ int pixel_clock;
+ int cea_video_id;
+ enum hdmi_picture_aspect aspect_ratio;
+ union {
+ struct hdmi_v13_conf v13_conf;
+ struct hdmi_v14_conf v14_conf;
+ } conf;
+};
+
+struct hdmi_context {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
+ bool hpd;
+ bool powered;
+ bool dvi_mode;
+ struct mutex hdmi_mutex;
+
+ void __iomem *regs;
+ int irq;
+ struct delayed_work hotplug_work;
+
+ struct i2c_adapter *ddc_adpt;
+ struct i2c_client *hdmiphy_port;
+
+ /* current hdmiphy conf regs */
+ struct drm_display_mode current_mode;
+ struct hdmi_conf_regs mode_conf;
+
+ struct hdmi_resources res;
+
+ int hpd_gpio;
+ void __iomem *regs_hdmiphy;
+ const struct hdmiphy_config *phy_confs;
+ unsigned int phy_conf_count;
+
+ struct regmap *pmureg;
+ enum hdmi_type type;
+};
+
+struct hdmiphy_config {
+ int pixel_clock;
+ u8 conf[32];
+};
+
+/* list of phy config settings */
+static const struct hdmiphy_config hdmiphy_v13_configs[] = {
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+ },
+ },
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+ 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+ },
+ },
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+ },
+ },
+};
+
+static const struct hdmiphy_config hdmiphy_v14_configs[] = {
+ {
+ .pixel_clock = 25200000,
+ .conf = {
+ 0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20,
+ 0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
+ },
+ {
+ .pixel_clock = 36000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 40000000,
+ .conf = {
+ 0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 65000000,
+ .conf = {
+ 0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08,
+ 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 71000000,
+ .conf = {
+ 0x01, 0xd1, 0x3b, 0x35, 0x40, 0x0c, 0x04, 0x08,
+ 0x85, 0xa0, 0x63, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 73250000,
+ .conf = {
+ 0x01, 0xd1, 0x3d, 0x35, 0x40, 0x18, 0x02, 0x08,
+ 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
+ 0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ },
+ },
+ {
+ .pixel_clock = 83500000,
+ .conf = {
+ 0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08,
+ 0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 106500000,
+ .conf = {
+ 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
+ 0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 108000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 115500000,
+ .conf = {
+ 0x01, 0xd1, 0x30, 0x12, 0x40, 0x40, 0x10, 0x08,
+ 0x80, 0x80, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 119000000,
+ .conf = {
+ 0x01, 0xd1, 0x32, 0x1a, 0x40, 0x30, 0xd8, 0x08,
+ 0x04, 0xa0, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 146250000,
+ .conf = {
+ 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
+ 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ },
+};
+
+static const struct hdmiphy_config hdmiphy_5420_configs[] = {
+ {
+ .pixel_clock = 25200000,
+ .conf = {
+ 0x01, 0x52, 0x3F, 0x55, 0x40, 0x01, 0x00, 0xC8,
+ 0x82, 0xC8, 0xBD, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x06, 0x80, 0x01, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xF4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0xD1, 0x22, 0x51, 0x40, 0x08, 0xFC, 0xE0,
+ 0x98, 0xE8, 0xCB, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x06, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0xD1, 0x2D, 0x72, 0x40, 0x64, 0x12, 0xC8,
+ 0x43, 0xE8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x26, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 36000000,
+ .conf = {
+ 0x01, 0x51, 0x2D, 0x55, 0x40, 0x40, 0x00, 0xC8,
+ 0x02, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xAB, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 40000000,
+ .conf = {
+ 0x01, 0xD1, 0x21, 0x31, 0x40, 0x3C, 0x28, 0xC8,
+ 0x87, 0xE8, 0xC8, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x9A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 65000000,
+ .conf = {
+ 0x01, 0xD1, 0x36, 0x34, 0x40, 0x0C, 0x04, 0xC8,
+ 0x82, 0xE8, 0x45, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xBD, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 71000000,
+ .conf = {
+ 0x01, 0xD1, 0x3B, 0x35, 0x40, 0x0C, 0x04, 0xC8,
+ 0x85, 0xE8, 0x63, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x57, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 73250000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x78, 0x8D, 0xC8,
+ 0x81, 0xE8, 0xB7, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xA8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0xC8,
+ 0x81, 0xE8, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
+ 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 83500000,
+ .conf = {
+ 0x01, 0xD1, 0x23, 0x11, 0x40, 0x0C, 0xFB, 0xC8,
+ 0x85, 0xE8, 0xD1, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x4A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 88750000,
+ .conf = {
+ 0x01, 0xD1, 0x25, 0x11, 0x40, 0x18, 0xFF, 0xC8,
+ 0x83, 0xE8, 0xDE, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x45, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 106500000,
+ .conf = {
+ 0x01, 0xD1, 0x2C, 0x12, 0x40, 0x0C, 0x09, 0xC8,
+ 0x84, 0xE8, 0x0A, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 108000000,
+ .conf = {
+ 0x01, 0x51, 0x2D, 0x15, 0x40, 0x01, 0x00, 0xC8,
+ 0x82, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xC7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 115500000,
+ .conf = {
+ 0x01, 0xD1, 0x30, 0x14, 0x40, 0x0C, 0x03, 0xC8,
+ 0x88, 0xE8, 0x21, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x6A, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 146250000,
+ .conf = {
+ 0x01, 0xD1, 0x3D, 0x15, 0x40, 0x18, 0xFD, 0xC8,
+ 0x83, 0xE8, 0x6E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x54, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
+ 0x54, 0x4B, 0x25, 0x03, 0x00, 0x80, 0x01, 0x80,
+ },
+ },
+};
+
+static struct hdmi_driver_data exynos5420_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+ .phy_confs = hdmiphy_5420_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_5420_configs),
+ .is_apb_phy = 1,
+};
+
+static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+ .phy_confs = hdmiphy_v14_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_v14_configs),
+ .is_apb_phy = 0,
+};
+
+static struct hdmi_driver_data exynos5_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+ .phy_confs = hdmiphy_v13_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
+ .is_apb_phy = 0,
+};
+
+static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
+{
+ return readl(hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
+ u32 reg_id, u8 value)
+{
+ writeb(value, hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
+ u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(hdata->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, hdata->regs + reg_id);
+}
+
+static int hdmiphy_reg_writeb(struct hdmi_context *hdata,
+ u32 reg_offset, u8 value)
+{
+ if (hdata->hdmiphy_port) {
+ u8 buffer[2];
+ int ret;
+
+ buffer[0] = reg_offset;
+ buffer[1] = value;
+
+ ret = i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+ if (ret == 2)
+ return 0;
+ return ret;
+ } else {
+ writeb(value, hdata->regs_hdmiphy + (reg_offset<<2));
+ return 0;
+ }
+}
+
+static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
+ u32 reg_offset, const u8 *buf, u32 len)
+{
+ if ((reg_offset + len) > 32)
+ return -EINVAL;
+
+ if (hdata->hdmiphy_port) {
+ int ret;
+
+ ret = i2c_master_send(hdata->hdmiphy_port, buf, len);
+ if (ret == len)
+ return 0;
+ return ret;
+ } else {
+ int i;
+ for (i = 0; i < len; i++)
+ writeb(buf[i], hdata->regs_hdmiphy +
+ ((reg_offset + i)<<2));
+ return 0;
+ }
+}
+
+static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+#define DUMPREG(reg_id) \
+ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdata->regs + reg_id))
+ DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_V13_PHY_RSTOUT);
+ DUMPREG(HDMI_V13_PHY_VPLL);
+ DUMPREG(HDMI_V13_PHY_CMU);
+ DUMPREG(HDMI_V13_CORE_RSTOUT);
+
+ DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_V13_PHY_STATUS);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_V13_HPD_GEN);
+ DUMPREG(HDMI_V13_DC_CONTROL);
+ DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN);
+
+ DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V13_V_BLANK_0);
+ DUMPREG(HDMI_V13_V_BLANK_1);
+ DUMPREG(HDMI_V13_V_BLANK_2);
+ DUMPREG(HDMI_V13_H_V_LINE_0);
+ DUMPREG(HDMI_V13_H_V_LINE_1);
+ DUMPREG(HDMI_V13_H_V_LINE_2);
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V13_V_BLANK_F_0);
+ DUMPREG(HDMI_V13_V_BLANK_F_1);
+ DUMPREG(HDMI_V13_V_BLANK_F_2);
+ DUMPREG(HDMI_V13_H_SYNC_GEN_0);
+ DUMPREG(HDMI_V13_H_SYNC_GEN_1);
+ DUMPREG(HDMI_V13_H_SYNC_GEN_2);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_1_0);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_1_1);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_1_2);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_2_0);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_2_1);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_2_2);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_3_0);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_3_1);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_3_2);
+
+ DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+ int i;
+
+#define DUMPREG(reg_id) \
+ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdata->regs + reg_id))
+
+ DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_INTC_CON_1);
+ DUMPREG(HDMI_INTC_FLAG_1);
+ DUMPREG(HDMI_PHY_STATUS_0);
+ DUMPREG(HDMI_PHY_STATUS_PLL);
+ DUMPREG(HDMI_PHY_CON_0);
+ DUMPREG(HDMI_PHY_RSTOUT);
+ DUMPREG(HDMI_PHY_VPLL);
+ DUMPREG(HDMI_PHY_CMU);
+ DUMPREG(HDMI_CORE_RSTOUT);
+
+ DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_PHY_STATUS_0);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_ENC_EN);
+ DUMPREG(HDMI_DC_CONTROL);
+ DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+ DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V2_BLANK_0);
+ DUMPREG(HDMI_V2_BLANK_1);
+ DUMPREG(HDMI_V1_BLANK_0);
+ DUMPREG(HDMI_V1_BLANK_1);
+ DUMPREG(HDMI_V_LINE_0);
+ DUMPREG(HDMI_V_LINE_1);
+ DUMPREG(HDMI_H_LINE_0);
+ DUMPREG(HDMI_H_LINE_1);
+ DUMPREG(HDMI_HSYNC_POL);
+
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V_BLANK_F0_0);
+ DUMPREG(HDMI_V_BLANK_F0_1);
+ DUMPREG(HDMI_V_BLANK_F1_0);
+ DUMPREG(HDMI_V_BLANK_F1_1);
+
+ DUMPREG(HDMI_H_SYNC_START_0);
+ DUMPREG(HDMI_H_SYNC_START_1);
+ DUMPREG(HDMI_H_SYNC_END_0);
+ DUMPREG(HDMI_H_SYNC_END_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0);
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1);
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0);
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1);
+
+ DUMPREG(HDMI_V_BLANK_F2_0);
+ DUMPREG(HDMI_V_BLANK_F2_1);
+ DUMPREG(HDMI_V_BLANK_F3_0);
+ DUMPREG(HDMI_V_BLANK_F3_1);
+ DUMPREG(HDMI_V_BLANK_F4_0);
+ DUMPREG(HDMI_V_BLANK_F4_1);
+ DUMPREG(HDMI_V_BLANK_F5_0);
+ DUMPREG(HDMI_V_BLANK_F5_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1);
+
+ DUMPREG(HDMI_VACT_SPACE_1_0);
+ DUMPREG(HDMI_VACT_SPACE_1_1);
+ DUMPREG(HDMI_VACT_SPACE_2_0);
+ DUMPREG(HDMI_VACT_SPACE_2_1);
+ DUMPREG(HDMI_VACT_SPACE_3_0);
+ DUMPREG(HDMI_VACT_SPACE_3_1);
+ DUMPREG(HDMI_VACT_SPACE_4_0);
+ DUMPREG(HDMI_VACT_SPACE_4_1);
+ DUMPREG(HDMI_VACT_SPACE_5_0);
+ DUMPREG(HDMI_VACT_SPACE_5_1);
+ DUMPREG(HDMI_VACT_SPACE_6_0);
+ DUMPREG(HDMI_VACT_SPACE_6_1);
+
+ DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VACT_ST3_L);
+ DUMPREG(HDMI_TG_VACT_ST3_H);
+ DUMPREG(HDMI_TG_VACT_ST4_L);
+ DUMPREG(HDMI_TG_VACT_ST4_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_3D);
+
+ DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_AVI_CON);
+ DUMPREG(HDMI_AVI_HEADER0);
+ DUMPREG(HDMI_AVI_HEADER1);
+ DUMPREG(HDMI_AVI_HEADER2);
+ DUMPREG(HDMI_AVI_CHECK_SUM);
+ DUMPREG(HDMI_VSI_CON);
+ DUMPREG(HDMI_VSI_HEADER0);
+ DUMPREG(HDMI_VSI_HEADER1);
+ DUMPREG(HDMI_VSI_HEADER2);
+ for (i = 0; i < 7; ++i)
+ DUMPREG(HDMI_VSI_DATA(i));
+
+#undef DUMPREG
+}
+
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+ if (hdata->type == HDMI_TYPE13)
+ hdmi_v13_regs_dump(hdata, prefix);
+ else
+ hdmi_v14_regs_dump(hdata, prefix);
+}
+
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+ u32 start, u8 len, u32 hdr_sum)
+{
+ int i;
+
+ /* hdr_sum : header0 + header1 + header2
+ * start : start address of packet byte1
+ * len : packet bytes - 1 */
+ for (i = 0; i < len; ++i)
+ hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+ /* return 2's complement of 8 bit hdr_sum */
+ return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+ union hdmi_infoframe *infoframe)
+{
+ u32 hdr_sum;
+ u8 chksum;
+ u32 mod;
+ u32 vic;
+
+ mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+ if (hdata->dvi_mode) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+ HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+ return;
+ }
+
+ switch (infoframe->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
+
+ /* Output format zero hardcoded ,RGB YBCR selection */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+ AVI_ACTIVE_FORMAT_VALID |
+ AVI_UNDERSCANNED_DISPLAY_VALID);
+
+ /*
+ * Set the aspect ratio as per the mode, mentioned in
+ * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
+ */
+ switch (hdata->mode_conf.aspect_ratio) {
+ case HDMI_PICTURE_ASPECT_4_3:
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
+ hdata->mode_conf.aspect_ratio |
+ AVI_4_3_CENTER_RATIO);
+ break;
+ case HDMI_PICTURE_ASPECT_16_9:
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
+ hdata->mode_conf.aspect_ratio |
+ AVI_16_9_CENTER_RATIO);
+ break;
+ case HDMI_PICTURE_ASPECT_NONE:
+ default:
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
+ hdata->mode_conf.aspect_ratio |
+ AVI_SAME_AS_PIC_ASPECT_RATIO);
+ break;
+ }
+
+ vic = hdata->mode_conf.cea_video_id;
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+ chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+ infoframe->any.length, hdr_sum);
+ DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
+ chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+ infoframe->any.length, hdr_sum);
+ DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+ break;
+ default:
+ break;
+ }
+}
+
+static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
+ bool force)
+{
+ struct hdmi_context *hdata = ctx_from_connector(connector);
+
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
+ return hdata->hpd ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void hdmi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = hdmi_detect,
+ .destroy = hdmi_connector_destroy,
+};
+
+static int hdmi_get_modes(struct drm_connector *connector)
+{
+ struct hdmi_context *hdata = ctx_from_connector(connector);
+ struct edid *edid;
+
+ if (!hdata->ddc_adpt)
+ return -ENODEV;
+
+ edid = drm_get_edid(connector, hdata->ddc_adpt);
+ if (!edid)
+ return -ENODEV;
+
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
+ DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ edid->width_cm, edid->height_cm);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ return drm_add_edid_modes(connector, edid);
+}
+
+static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
+{
+ int i;
+
+ for (i = 0; i < hdata->phy_conf_count; i++)
+ if (hdata->phy_confs[i].pixel_clock == pixel_clock)
+ return i;
+
+ DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+ return -EINVAL;
+}
+
+static int hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_context *hdata = ctx_from_connector(connector);
+ int ret;
+
+ DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
+ false, mode->clock * 1000);
+
+ ret = mixer_check_mode(mode);
+ if (ret)
+ return MODE_BAD;
+
+ ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
+ if (ret < 0)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
+{
+ struct hdmi_context *hdata = ctx_from_connector(connector);
+
+ return hdata->encoder;
+}
+
+static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+ .get_modes = hdmi_get_modes,
+ .mode_valid = hdmi_mode_valid,
+ .best_encoder = hdmi_best_encoder,
+};
+
+static int hdmi_create_connector(struct exynos_drm_display *display,
+ struct drm_encoder *encoder)
+{
+ struct hdmi_context *hdata = display->ctx;
+ struct drm_connector *connector = &hdata->connector;
+ int ret;
+
+ hdata->encoder = encoder;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(hdata->drm_dev, connector,
+ &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void hdmi_mode_fixup(struct exynos_drm_display *display,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_display_mode *m;
+ int mode_ok;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ mode_ok = hdmi_mode_valid(connector, adjusted_mode);
+
+ /* just return if user desired mode exists. */
+ if (mode_ok == MODE_OK)
+ return;
+
+ /*
+ * otherwise, find the most suitable mode among modes and change it
+ * to adjusted_mode.
+ */
+ list_for_each_entry(m, &connector->modes, head) {
+ mode_ok = hdmi_mode_valid(connector, m);
+
+ if (mode_ok == MODE_OK) {
+ DRM_INFO("desired mode doesn't exist so\n");
+ DRM_INFO("use the most suitable mode among modes.\n");
+
+ DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+
+ drm_mode_copy(adjusted_mode, m);
+ break;
+ }
+ }
+}
+
+static void hdmi_set_acr(u32 freq, u8 *acr)
+{
+ u32 n, cts;
+
+ switch (freq) {
+ case 32000:
+ n = 4096;
+ cts = 27000;
+ break;
+ case 44100:
+ n = 6272;
+ cts = 30000;
+ break;
+ case 88200:
+ n = 12544;
+ cts = 30000;
+ break;
+ case 176400:
+ n = 25088;
+ cts = 30000;
+ break;
+ case 48000:
+ n = 6144;
+ cts = 27000;
+ break;
+ case 96000:
+ n = 12288;
+ cts = 27000;
+ break;
+ case 192000:
+ n = 24576;
+ cts = 27000;
+ break;
+ default:
+ n = 0;
+ cts = 0;
+ break;
+ }
+
+ acr[1] = cts >> 16;
+ acr[2] = cts >> 8 & 0xff;
+ acr[3] = cts & 0xff;
+
+ acr[4] = n >> 16;
+ acr[5] = n >> 8 & 0xff;
+ acr[6] = n & 0xff;
+}
+
+static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
+{
+ hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
+
+ if (hdata->type == HDMI_TYPE13)
+ hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
+ else
+ hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
+}
+
+static void hdmi_audio_init(struct hdmi_context *hdata)
+{
+ u32 sample_rate, bits_per_sample, frame_size_code;
+ u32 data_num, bit_ch, sample_frq;
+ u32 val;
+ u8 acr[7];
+
+ sample_rate = 44100;
+ bits_per_sample = 16;
+ frame_size_code = 0;
+
+ switch (bits_per_sample) {
+ case 20:
+ data_num = 2;
+ bit_ch = 1;
+ break;
+ case 24:
+ data_num = 3;
+ bit_ch = 1;
+ break;
+ default:
+ data_num = 1;
+ bit_ch = 0;
+ break;
+ }
+
+ hdmi_set_acr(sample_rate, acr);
+ hdmi_reg_acr(hdata, acr);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
+ | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
+ | HDMI_I2S_MUX_ENABLE);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN
+ | HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN);
+
+ sample_frq = (sample_rate == 44100) ? 0 :
+ (sample_rate == 48000) ? 2 :
+ (sample_rate == 32000) ? 3 :
+ (sample_rate == 96000) ? 0xa : 0x0;
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN);
+
+ val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01;
+ hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val);
+
+ /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
+ | HDMI_I2S_SEL_LRCK(6));
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
+ | HDMI_I2S_SEL_SDATA2(4));
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
+ | HDMI_I2S_SEL_SDATA2(2));
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
+
+ /* I2S_CON_1 & 2 */
+ hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE
+ | HDMI_I2S_L_CH_LOW_POL);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE
+ | HDMI_I2S_SET_BIT_CH(bit_ch)
+ | HDMI_I2S_SET_SDATA_BIT(data_num)
+ | HDMI_I2S_BASIC_FORMAT);
+
+ /* Configure register related to CUV information */
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0
+ | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH
+ | HDMI_I2S_COPYRIGHT
+ | HDMI_I2S_LINEAR_PCM
+ | HDMI_I2S_CONSUMER_FORMAT);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0));
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2
+ | HDMI_I2S_SET_SMP_FREQ(sample_frq));
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4,
+ HDMI_I2S_ORG_SMP_FREQ_44_1
+ | HDMI_I2S_WORD_LEN_MAX24_24BITS
+ | HDMI_I2S_WORD_LEN_MAX_24BITS);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD);
+}
+
+static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
+{
+ if (hdata->dvi_mode)
+ return;
+
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
+ hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ?
+ HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
+}
+
+static void hdmi_start(struct hdmi_context *hdata, bool start)
+{
+ u32 val = start ? HDMI_TG_EN : 0;
+
+ if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= HDMI_FIELD_EN;
+
+ hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN);
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, val, HDMI_TG_EN | HDMI_FIELD_EN);
+}
+
+static void hdmi_conf_init(struct hdmi_context *hdata)
+{
+ union hdmi_infoframe infoframe;
+
+ /* disable HPD interrupts from HDMI IP block, use GPIO instead */
+ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+
+ /* choose HDMI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ /* Apply Video preable and Guard band in HDMI mode only */
+ hdmi_reg_writeb(hdata, HDMI_CON_2, 0);
+ /* disable bluescreen */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+
+ if (hdata->dvi_mode) {
+ /* choose DVI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
+ hdmi_reg_writeb(hdata, HDMI_CON_2,
+ HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
+ }
+
+ if (hdata->type == HDMI_TYPE13) {
+ /* choose bluescreen (fecal) color */
+ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
+ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
+ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56);
+
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02);
+ /* force RGB, look to CEA-861-D, table 7 for more detail */
+ hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5);
+ hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+ hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
+ } else {
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+ infoframe.any.version = HDMI_AVI_VERSION;
+ infoframe.any.length = HDMI_AVI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+ infoframe.any.version = HDMI_AUI_VERSION;
+ infoframe.any.length = HDMI_AUI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
+ }
+}
+
+static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
+{
+ const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+ const struct hdmi_v13_core_regs *core =
+ &hdata->mode_conf.conf.v13_conf.core;
+ int tries;
+
+ /* setting core registers */
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ /* Timing generator registers */
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ usleep_range(1000, 2000);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+ hdmi_regs_dump(hdata, "timing apply");
+ }
+
+ clk_disable_unprepare(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy);
+ clk_prepare_enable(hdata->res.sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_start(hdata, true);
+}
+
+static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
+{
+ const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+ const struct hdmi_v14_core_regs *core =
+ &hdata->mode_conf.conf.v14_conf.core;
+ int tries;
+
+ /* setting core registers */
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
+ hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
+ core->v_sync_line_bef_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
+ core->v_sync_line_bef_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
+ core->v_sync_line_bef_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
+ core->v_sync_line_bef_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
+ core->v_sync_line_aft_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
+ core->v_sync_line_aft_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
+ core->v_sync_line_aft_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
+ core->v_sync_line_aft_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
+ core->v_sync_line_aft_pxl_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
+ core->v_sync_line_aft_pxl_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
+ core->v_sync_line_aft_pxl_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
+ core->v_sync_line_aft_pxl_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
+ core->v_sync_line_aft_3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
+ core->v_sync_line_aft_3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
+ core->v_sync_line_aft_4[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
+ core->v_sync_line_aft_4[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
+ core->v_sync_line_aft_5[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
+ core->v_sync_line_aft_5[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
+ core->v_sync_line_aft_6[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
+ core->v_sync_line_aft_6[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
+ core->v_sync_line_aft_pxl_3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
+ core->v_sync_line_aft_pxl_3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
+ core->v_sync_line_aft_pxl_4[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
+ core->v_sync_line_aft_pxl_4[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
+ core->v_sync_line_aft_pxl_5[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
+ core->v_sync_line_aft_pxl_5[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
+ core->v_sync_line_aft_pxl_6[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
+ core->v_sync_line_aft_pxl_6[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+
+ /* Timing generator registers */
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ usleep_range(1000, 2000);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+ hdmi_regs_dump(hdata, "timing apply");
+ }
+
+ clk_disable_unprepare(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy);
+ clk_prepare_enable(hdata->res.sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_start(hdata, true);
+}
+
+static void hdmi_mode_apply(struct hdmi_context *hdata)
+{
+ if (hdata->type == HDMI_TYPE13)
+ hdmi_v13_mode_apply(hdata);
+ else
+ hdmi_v14_mode_apply(hdata);
+}
+
+static void hdmiphy_conf_reset(struct hdmi_context *hdata)
+{
+ u8 buffer[2];
+ u32 reg;
+
+ clk_disable_unprepare(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_pixel);
+ clk_prepare_enable(hdata->res.sclk_hdmi);
+
+ /* operation mode */
+ buffer[0] = 0x1f;
+ buffer[1] = 0x00;
+
+ if (hdata->hdmiphy_port)
+ i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+
+ if (hdata->type == HDMI_TYPE13)
+ reg = HDMI_V13_PHY_RSTOUT;
+ else
+ reg = HDMI_PHY_RSTOUT;
+
+ /* reset hdmiphy */
+ hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
+ usleep_range(10000, 12000);
+ hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT);
+ usleep_range(10000, 12000);
+}
+
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+ if (hdata->type != HDMI_TYPE14)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
+ /* Phy Power On */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
+ HDMI_PHY_POWER_ON);
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_DISABLE_MODE_SET);
+ /* PHY SW Reset */
+ hdmiphy_conf_reset(hdata);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+ if (hdata->type != HDMI_TYPE14)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* PHY SW Reset */
+ hdmiphy_conf_reset(hdata);
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
+
+ /* PHY Power Off */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
+ HDMI_PHY_POWER_OFF);
+
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_DISABLE_MODE_SET);
+}
+
+static void hdmiphy_conf_apply(struct hdmi_context *hdata)
+{
+ int ret;
+ int i;
+
+ /* pixel clock */
+ i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+ if (i < 0) {
+ DRM_ERROR("failed to find hdmiphy conf\n");
+ return;
+ }
+
+ ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
+ if (ret) {
+ DRM_ERROR("failed to configure hdmiphy\n");
+ return;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_DISABLE_MODE_SET);
+ if (ret) {
+ DRM_ERROR("failed to enable hdmiphy\n");
+ return;
+ }
+
+}
+
+static void hdmi_conf_apply(struct hdmi_context *hdata)
+{
+ hdmiphy_conf_reset(hdata);
+ hdmiphy_conf_apply(hdata);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ hdmi_start(hdata, false);
+ hdmi_conf_init(hdata);
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ hdmi_audio_init(hdata);
+
+ /* setting core registers */
+ hdmi_mode_apply(hdata);
+ hdmi_audio_control(hdata, true);
+
+ hdmi_regs_dump(hdata, "start");
+}
+
+static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
+{
+ int i;
+ BUG_ON(num_bytes > 4);
+ for (i = 0; i < num_bytes; i++)
+ reg_pair[i] = (value >> (8 * i)) & 0xff;
+}
+
+static void hdmi_v13_mode_set(struct hdmi_context *hdata,
+ struct drm_display_mode *m)
+{
+ struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+ unsigned int val;
+
+ hdata->mode_conf.cea_video_id =
+ drm_match_cea_mode((struct drm_display_mode *)m);
+ hdata->mode_conf.pixel_clock = m->clock * 1000;
+ hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
+
+ hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
+
+ val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ hdmi_set_reg(core->vsync_pol, 1, val);
+
+ val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+ hdmi_set_reg(core->int_pro_mode, 1, val);
+
+ val = (m->hsync_start - m->hdisplay - 2);
+ val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+ val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
+ hdmi_set_reg(core->h_sync_gen, 3, val);
+
+ /*
+ * Quirk requirement for exynos HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ val = ((m->vsync_end - m->vdisplay) / 2);
+ val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+ hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+ val = m->vtotal / 2;
+ val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+ hdmi_set_reg(core->v_blank, 3, val);
+
+ val = (m->vtotal +
+ ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+ val |= m->vtotal << 11;
+ hdmi_set_reg(core->v_blank_f, 3, val);
+
+ val = ((m->vtotal / 2) + 7);
+ val |= ((m->vtotal / 2) + 2) << 12;
+ hdmi_set_reg(core->v_sync_gen2, 3, val);
+
+ val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ val |= ((m->htotal / 2) +
+ (m->hsync_start - m->hdisplay)) << 12;
+ hdmi_set_reg(core->v_sync_gen3, 3, val);
+
+ hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+
+ hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+ } else {
+ /* Progressive Mode */
+
+ val = m->vtotal;
+ val |= (m->vtotal - m->vdisplay) << 11;
+ hdmi_set_reg(core->v_blank, 3, val);
+
+ hdmi_set_reg(core->v_blank_f, 3, 0);
+
+ val = (m->vsync_end - m->vdisplay);
+ val |= ((m->vsync_start - m->vdisplay) << 12);
+ hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+ hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */
+ hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */
+ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+ hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
+ }
+
+ /* Timing generator registers */
+ hdmi_set_reg(tg->cmd, 1, 0x0);
+ hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+ hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+ hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+ hdmi_set_reg(tg->vsync, 2, 0x1);
+ hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
+}
+
+static void hdmi_v14_mode_set(struct hdmi_context *hdata,
+ struct drm_display_mode *m)
+{
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+ struct hdmi_v14_core_regs *core =
+ &hdata->mode_conf.conf.v14_conf.core;
+
+ hdata->mode_conf.cea_video_id =
+ drm_match_cea_mode((struct drm_display_mode *)m);
+ hdata->mode_conf.pixel_clock = m->clock * 1000;
+ hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
+
+ hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(core->v_line, 2, m->vtotal);
+ hdmi_set_reg(core->h_line, 2, m->htotal);
+ hdmi_set_reg(core->hsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
+ hdmi_set_reg(core->vsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+ hdmi_set_reg(core->int_pro_mode, 1,
+ (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+ /*
+ * Quirk requirement for exynos 5 HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ (m->vsync_end - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ (m->vsync_start - m->vdisplay) / 2);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
+ hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
+ hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+ hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
+ hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
+ hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
+ hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
+ hdmi_set_reg(tg->vact_st3, 2, 0x0);
+ hdmi_set_reg(tg->vact_st4, 2, 0x0);
+ } else {
+ /* Progressive Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ m->vsync_end - m->vdisplay);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ m->vsync_start - m->vdisplay);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal);
+ hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
+ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+ hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
+ hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
+ hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
+ hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+ }
+
+ /* Following values & calculations are same irrespective of mode type */
+ hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
+ hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
+ hdmi_set_reg(core->vact_space_1, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_2, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_3, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_4, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_5, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_6, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
+
+ /* Timing generator registers */
+ hdmi_set_reg(tg->cmd, 1, 0x0);
+ hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+ hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+ hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+ hdmi_set_reg(tg->vsync, 2, 0x1);
+ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->tg_3d, 1, 0x0);
+}
+
+static void hdmi_mode_set(struct exynos_drm_display *display,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_context *hdata = display->ctx;
+ struct drm_display_mode *m = mode;
+
+ DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
+ m->hdisplay, m->vdisplay,
+ m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
+ "INTERLACED" : "PROGERESSIVE");
+
+ /* preserve mode information for later use. */
+ drm_mode_copy(&hdata->current_mode, mode);
+
+ if (hdata->type == HDMI_TYPE13)
+ hdmi_v13_mode_set(hdata, mode);
+ else
+ hdmi_v14_mode_set(hdata, mode);
+}
+
+static void hdmi_commit(struct exynos_drm_display *display)
+{
+ struct hdmi_context *hdata = display->ctx;
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ hdmi_conf_apply(hdata);
+}
+
+static void hdmi_poweron(struct exynos_drm_display *display)
+{
+ struct hdmi_context *hdata = display->ctx;
+ struct hdmi_resources *res = &hdata->res;
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+
+ hdata->powered = true;
+
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ pm_runtime_get_sync(hdata->dev);
+
+ if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
+ DRM_DEBUG_KMS("failed to enable regulator bulk\n");
+
+ /* set pmu hdmiphy control bit to enable hdmiphy */
+ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
+ PMU_HDMI_PHY_ENABLE_BIT, 1);
+
+ clk_prepare_enable(res->hdmi);
+ clk_prepare_enable(res->sclk_hdmi);
+
+ hdmiphy_poweron(hdata);
+ hdmi_commit(display);
+}
+
+static void hdmi_poweroff(struct exynos_drm_display *display)
+{
+ struct hdmi_context *hdata = display->ctx;
+ struct hdmi_resources *res = &hdata->res;
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered)
+ goto out;
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ /* HDMI System Disable */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
+
+ hdmiphy_poweroff(hdata);
+
+ cancel_delayed_work(&hdata->hotplug_work);
+
+ clk_disable_unprepare(res->sclk_hdmi);
+ clk_disable_unprepare(res->hdmi);
+
+ /* reset pmu hdmiphy control bit to disable hdmiphy */
+ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
+ PMU_HDMI_PHY_ENABLE_BIT, 0);
+
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+
+ pm_runtime_put_sync(hdata->dev);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->powered = false;
+
+out:
+ mutex_unlock(&hdata->hdmi_mutex);
+}
+
+static void hdmi_dpms(struct exynos_drm_display *display, int mode)
+{
+ struct hdmi_context *hdata = display->ctx;
+ struct drm_encoder *encoder = hdata->encoder;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc_helper_funcs *funcs = NULL;
+
+ DRM_DEBUG_KMS("mode %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ hdmi_poweron(display);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * Below codes will try to disable Mixer and VP(if used)
+ * prior to disabling HDMI.
+ */
+ if (crtc)
+ funcs = crtc->helper_private;
+ if (funcs && funcs->dpms)
+ (*funcs->dpms)(crtc, mode);
+
+ hdmi_poweroff(display);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_drm_display_ops hdmi_display_ops = {
+ .create_connector = hdmi_create_connector,
+ .mode_fixup = hdmi_mode_fixup,
+ .mode_set = hdmi_mode_set,
+ .dpms = hdmi_dpms,
+ .commit = hdmi_commit,
+};
+
+static struct exynos_drm_display hdmi_display = {
+ .type = EXYNOS_DISPLAY_TYPE_HDMI,
+ .ops = &hdmi_display_ops,
+};
+
+static void hdmi_hotplug_work_func(struct work_struct *work)
+{
+ struct hdmi_context *hdata;
+
+ hdata = container_of(work, struct hdmi_context, hotplug_work.work);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ if (hdata->drm_dev)
+ drm_helper_hpd_irq_event(hdata->drm_dev);
+}
+
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
+{
+ struct hdmi_context *hdata = arg;
+
+ mod_delayed_work(system_wq, &hdata->hotplug_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+
+ return IRQ_HANDLED;
+}
+
+static int hdmi_resources_init(struct hdmi_context *hdata)
+{
+ struct device *dev = hdata->dev;
+ struct hdmi_resources *res = &hdata->res;
+ static char *supply[] = {
+ "hdmi-en",
+ "vdd",
+ "vdd_osc",
+ "vdd_pll",
+ };
+ int i, ret;
+
+ DRM_DEBUG_KMS("HDMI resource init\n");
+
+ /* get clocks, power */
+ res->hdmi = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(res->hdmi)) {
+ DRM_ERROR("failed to get clock 'hdmi'\n");
+ ret = PTR_ERR(res->hdmi);
+ goto fail;
+ }
+ res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+ if (IS_ERR(res->sclk_hdmi)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+ ret = PTR_ERR(res->sclk_hdmi);
+ goto fail;
+ }
+ res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
+ if (IS_ERR(res->sclk_pixel)) {
+ DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+ ret = PTR_ERR(res->sclk_pixel);
+ goto fail;
+ }
+ res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
+ if (IS_ERR(res->sclk_hdmiphy)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
+ ret = PTR_ERR(res->sclk_hdmiphy);
+ goto fail;
+ }
+ res->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
+ if (IS_ERR(res->mout_hdmi)) {
+ DRM_ERROR("failed to get clock 'mout_hdmi'\n");
+ ret = PTR_ERR(res->mout_hdmi);
+ goto fail;
+ }
+
+ clk_set_parent(res->mout_hdmi, res->sclk_pixel);
+
+ res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
+ sizeof(res->regul_bulk[0]), GFP_KERNEL);
+ if (!res->regul_bulk) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+ res->regul_bulk[i].supply = supply[i];
+ res->regul_bulk[i].consumer = NULL;
+ }
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ if (ret) {
+ DRM_ERROR("failed to get regulators\n");
+ return ret;
+ }
+ res->regul_count = ARRAY_SIZE(supply);
+
+ return ret;
+fail:
+ DRM_ERROR("HDMI resource init - failed\n");
+ return ret;
+}
+
+static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
+ (struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct s5p_hdmi_platform_data *pd;
+ u32 value;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ goto err_data;
+
+ if (!of_find_property(np, "hpd-gpio", &value)) {
+ DRM_ERROR("no hpd gpio property found\n");
+ goto err_data;
+ }
+
+ pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
+
+ return pd;
+
+err_data:
+ return NULL;
+}
+
+static struct of_device_id hdmi_match_types[] = {
+ {
+ .compatible = "samsung,exynos5-hdmi",
+ .data = &exynos5_hdmi_driver_data,
+ }, {
+ .compatible = "samsung,exynos4212-hdmi",
+ .data = &exynos4212_hdmi_driver_data,
+ }, {
+ .compatible = "samsung,exynos5420-hdmi",
+ .data = &exynos5420_hdmi_driver_data,
+ }, {
+ /* end node */
+ }
+};
+
+static int hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm_dev = data;
+ struct hdmi_context *hdata;
+
+ hdata = hdmi_display.ctx;
+ hdata->drm_dev = drm_dev;
+
+ return exynos_drm_create_enc_conn(drm_dev, &hdmi_display);
+}
+
+static void hdmi_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct exynos_drm_display *display = get_hdmi_display(dev);
+ struct drm_encoder *encoder = display->encoder;
+ struct hdmi_context *hdata = display->ctx;
+
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&hdata->connector);
+}
+
+static const struct component_ops hdmi_component_ops = {
+ .bind = hdmi_bind,
+ .unbind = hdmi_unbind,
+};
+
+static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev)
+{
+ const char *compatible_str = "samsung,exynos4210-hdmiddc";
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, compatible_str);
+ if (np)
+ return of_get_next_parent(np);
+
+ return NULL;
+}
+
+static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
+{
+ const char *compatible_str = "samsung,exynos4212-hdmiphy";
+
+ return of_find_compatible_node(NULL, NULL, compatible_str);
+}
+
+static int hdmi_probe(struct platform_device *pdev)
+{
+ struct device_node *ddc_node, *phy_node;
+ struct s5p_hdmi_platform_data *pdata;
+ struct hdmi_driver_data *drv_data;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct hdmi_context *hdata;
+ struct resource *res;
+ int ret;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ hdmi_display.type);
+ if (ret)
+ return ret;
+
+ if (!dev->of_node) {
+ ret = -ENODEV;
+ goto err_del_component;
+ }
+
+ pdata = drm_hdmi_dt_parse_pdata(dev);
+ if (!pdata) {
+ ret = -EINVAL;
+ goto err_del_component;
+ }
+
+ hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
+ if (!hdata) {
+ ret = -ENOMEM;
+ goto err_del_component;
+ }
+
+ mutex_init(&hdata->hdmi_mutex);
+
+ platform_set_drvdata(pdev, &hdmi_display);
+
+ match = of_match_node(hdmi_match_types, dev->of_node);
+ if (!match) {
+ ret = -ENODEV;
+ goto err_del_component;
+ }
+
+ drv_data = (struct hdmi_driver_data *)match->data;
+ hdata->type = drv_data->type;
+ hdata->phy_confs = drv_data->phy_confs;
+ hdata->phy_conf_count = drv_data->phy_conf_count;
+
+ hdata->hpd_gpio = pdata->hpd_gpio;
+ hdata->dev = dev;
+
+ ret = hdmi_resources_init(hdata);
+ if (ret) {
+ DRM_ERROR("hdmi_resources_init failed\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdata->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdata->regs)) {
+ ret = PTR_ERR(hdata->regs);
+ goto err_del_component;
+ }
+
+ ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
+ if (ret) {
+ DRM_ERROR("failed to request HPD gpio\n");
+ goto err_del_component;
+ }
+
+ ddc_node = hdmi_legacy_ddc_dt_binding(dev);
+ if (ddc_node)
+ goto out_get_ddc_adpt;
+
+ /* DDC i2c driver */
+ ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (!ddc_node) {
+ DRM_ERROR("Failed to find ddc node in device tree\n");
+ ret = -ENODEV;
+ goto err_del_component;
+ }
+
+out_get_ddc_adpt:
+ hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
+ if (!hdata->ddc_adpt) {
+ DRM_ERROR("Failed to get ddc i2c adapter by node\n");
+ return -EPROBE_DEFER;
+ }
+
+ phy_node = hdmi_legacy_phy_dt_binding(dev);
+ if (phy_node)
+ goto out_get_phy_port;
+
+ /* hdmiphy i2c driver */
+ phy_node = of_parse_phandle(dev->of_node, "phy", 0);
+ if (!phy_node) {
+ DRM_ERROR("Failed to find hdmiphy node in device tree\n");
+ ret = -ENODEV;
+ goto err_ddc;
+ }
+
+out_get_phy_port:
+ if (drv_data->is_apb_phy) {
+ hdata->regs_hdmiphy = of_iomap(phy_node, 0);
+ if (!hdata->regs_hdmiphy) {
+ DRM_ERROR("failed to ioremap hdmi phy\n");
+ ret = -ENOMEM;
+ goto err_ddc;
+ }
+ } else {
+ hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
+ if (!hdata->hdmiphy_port) {
+ DRM_ERROR("Failed to get hdmi phy i2c client\n");
+ ret = -EPROBE_DEFER;
+ goto err_ddc;
+ }
+ }
+
+ hdata->irq = gpio_to_irq(hdata->hpd_gpio);
+ if (hdata->irq < 0) {
+ DRM_ERROR("failed to get GPIO irq\n");
+ ret = hdata->irq;
+ goto err_hdmiphy;
+ }
+
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
+ INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
+
+ ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
+ hdmi_irq_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hdmi", hdata);
+ if (ret) {
+ DRM_ERROR("failed to register hdmi interrupt\n");
+ goto err_hdmiphy;
+ }
+
+ hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(hdata->pmureg)) {
+ DRM_ERROR("syscon regmap lookup failed.\n");
+ ret = -EPROBE_DEFER;
+ goto err_hdmiphy;
+ }
+
+ pm_runtime_enable(dev);
+ hdmi_display.ctx = hdata;
+
+ ret = component_add(&pdev->dev, &hdmi_component_ops);
+ if (ret)
+ goto err_disable_pm_runtime;
+
+ return ret;
+
+err_disable_pm_runtime:
+ pm_runtime_disable(dev);
+
+err_hdmiphy:
+ if (hdata->hdmiphy_port)
+ put_device(&hdata->hdmiphy_port->dev);
+err_ddc:
+ put_device(&hdata->ddc_adpt->dev);
+
+err_del_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return ret;
+}
+
+static int hdmi_remove(struct platform_device *pdev)
+{
+ struct hdmi_context *hdata = hdmi_display.ctx;
+
+ cancel_delayed_work_sync(&hdata->hotplug_work);
+
+ put_device(&hdata->hdmiphy_port->dev);
+ put_device(&hdata->ddc_adpt->dev);
+
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &hdmi_component_ops);
+
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ return 0;
+}
+
+struct platform_driver hdmi_driver = {
+ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+ .driver = {
+ .name = "exynos-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = hdmi_match_types,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644
index 00000000000..7529946d0a7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -0,0 +1,1327 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/mixer_reg.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/component.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_mixer.h"
+
+#define get_mixer_manager(dev) platform_get_drvdata(to_platform_device(dev))
+
+#define MIXER_WIN_NR 3
+#define MIXER_DEFAULT_WIN 0
+
+struct hdmi_win_data {
+ dma_addr_t dma_addr;
+ dma_addr_t chroma_dma_addr;
+ uint32_t pixel_format;
+ unsigned int bpp;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_width;
+ unsigned int crtc_height;
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
+ unsigned int mode_width;
+ unsigned int mode_height;
+ unsigned int scan_flags;
+ bool enabled;
+ bool resume;
+};
+
+struct mixer_resources {
+ int irq;
+ void __iomem *mixer_regs;
+ void __iomem *vp_regs;
+ spinlock_t reg_slock;
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_dac;
+};
+
+enum mixer_version_id {
+ MXR_VER_0_0_0_16,
+ MXR_VER_16_0_33_0,
+ MXR_VER_128_0_0_184,
+};
+
+struct mixer_context {
+ struct platform_device *pdev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ int pipe;
+ bool interlace;
+ bool powered;
+ bool vp_enabled;
+ u32 int_en;
+
+ struct mutex mixer_mutex;
+ struct mixer_resources mixer_res;
+ struct hdmi_win_data win_data[MIXER_WIN_NR];
+ enum mixer_version_id mxr_ver;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
+};
+
+struct mixer_drv_data {
+ enum mixer_version_id version;
+ bool is_vp_enabled;
+};
+
+static const u8 filter_y_horiz_tap8[] = {
+ 0, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 0, 0, 0,
+ 0, 2, 4, 5, 6, 6, 6, 6,
+ 6, 5, 5, 4, 3, 2, 1, 1,
+ 0, -6, -12, -16, -18, -20, -21, -20,
+ -20, -18, -16, -13, -10, -8, -5, -2,
+ 127, 126, 125, 121, 114, 107, 99, 89,
+ 79, 68, 57, 46, 35, 25, 16, 8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+ 0, 5, 11, 19, 27, 37, 48, 59,
+ 70, 81, 92, 102, 111, 118, 124, 126,
+ 0, 0, -1, -1, -2, -3, -4, -5,
+ -6, -7, -8, -8, -8, -8, -6, -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+};
+
+static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = vp_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_writemask(struct mixer_resources *res,
+ u32 reg_id, u32 val, u32 mask)
+{
+ u32 old = mixer_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static void mixer_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(MXR_STATUS);
+ DUMPREG(MXR_CFG);
+ DUMPREG(MXR_INT_EN);
+ DUMPREG(MXR_INT_STATUS);
+
+ DUMPREG(MXR_LAYER_CFG);
+ DUMPREG(MXR_VIDEO_CFG);
+
+ DUMPREG(MXR_GRAPHIC0_CFG);
+ DUMPREG(MXR_GRAPHIC0_BASE);
+ DUMPREG(MXR_GRAPHIC0_SPAN);
+ DUMPREG(MXR_GRAPHIC0_WH);
+ DUMPREG(MXR_GRAPHIC0_SXY);
+ DUMPREG(MXR_GRAPHIC0_DXY);
+
+ DUMPREG(MXR_GRAPHIC1_CFG);
+ DUMPREG(MXR_GRAPHIC1_BASE);
+ DUMPREG(MXR_GRAPHIC1_SPAN);
+ DUMPREG(MXR_GRAPHIC1_WH);
+ DUMPREG(MXR_GRAPHIC1_SXY);
+ DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void vp_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(VP_ENABLE);
+ DUMPREG(VP_SRESET);
+ DUMPREG(VP_SHADOW_UPDATE);
+ DUMPREG(VP_FIELD_ID);
+ DUMPREG(VP_MODE);
+ DUMPREG(VP_IMG_SIZE_Y);
+ DUMPREG(VP_IMG_SIZE_C);
+ DUMPREG(VP_PER_RATE_CTRL);
+ DUMPREG(VP_TOP_Y_PTR);
+ DUMPREG(VP_BOT_Y_PTR);
+ DUMPREG(VP_TOP_C_PTR);
+ DUMPREG(VP_BOT_C_PTR);
+ DUMPREG(VP_ENDIAN_MODE);
+ DUMPREG(VP_SRC_H_POSITION);
+ DUMPREG(VP_SRC_V_POSITION);
+ DUMPREG(VP_SRC_WIDTH);
+ DUMPREG(VP_SRC_HEIGHT);
+ DUMPREG(VP_DST_H_POSITION);
+ DUMPREG(VP_DST_V_POSITION);
+ DUMPREG(VP_DST_WIDTH);
+ DUMPREG(VP_DST_HEIGHT);
+ DUMPREG(VP_H_RATIO);
+ DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+static inline void vp_filter_set(struct mixer_resources *res,
+ int reg_id, const u8 *data, unsigned int size)
+{
+ /* assure 4-byte align */
+ BUG_ON(size & 3);
+ for (; size; size -= 4, reg_id += 4, data += 4) {
+ u32 val = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ vp_reg_write(res, reg_id, val);
+ }
+}
+
+static void vp_default_filter(struct mixer_resources *res)
+{
+ vp_filter_set(res, VP_POLY8_Y0_LL,
+ filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
+ vp_filter_set(res, VP_POLY4_Y0_LL,
+ filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
+ vp_filter_set(res, VP_POLY4_C0_LL,
+ filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
+}
+
+static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ /* block update on vsync */
+ mixer_reg_writemask(res, MXR_STATUS, enable ?
+ MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+
+ if (ctx->vp_enabled)
+ vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+ VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ /* choosing between interlace and progressive mode */
+ val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
+ MXR_CFG_SCAN_PROGRASSIVE);
+
+ if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
+ /* choosing between proper HD and SD mode */
+ if (height <= 480)
+ val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+ else if (height <= 576)
+ val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+ else if (height <= 720)
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+ else if (height <= 1080)
+ val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+ else
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+ }
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+}
+
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ if (height == 480) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 576) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 720) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else if (height == 1080) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ }
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+}
+
+static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val = enable ? ~0 : 0;
+
+ switch (win) {
+ case 0:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ break;
+ case 1:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ break;
+ case 2:
+ if (ctx->vp_enabled) {
+ vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
+ mixer_reg_writemask(res, MXR_CFG, val,
+ MXR_CFG_VP_ENABLE);
+ }
+ break;
+ }
+}
+
+static void mixer_run(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+ mixer_regs_dump(ctx);
+}
+
+static void mixer_stop(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int timeout = 20;
+
+ mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+ while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+ --timeout)
+ usleep_range(10000, 12000);
+
+ mixer_regs_dump(ctx);
+}
+
+static void vp_video_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int x_ratio, y_ratio;
+ unsigned int buf_num = 1;
+ dma_addr_t luma_addr[2], chroma_addr[2];
+ bool tiled_mode = false;
+ bool crcb_mode = false;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_NV12MT:
+ tiled_mode = true;
+ case DRM_FORMAT_NV12:
+ crcb_mode = false;
+ buf_num = 2;
+ break;
+ /* TODO: single buffer format NV12, NV21 */
+ default:
+ /* ignore pixel format at disable time */
+ if (!win_data->dma_addr)
+ break;
+
+ DRM_ERROR("pixel format for vp is wrong [%d].\n",
+ win_data->pixel_format);
+ return;
+ }
+
+ /* scaling feature: (src << 16) / dst */
+ x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
+ y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
+
+ if (buf_num == 2) {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->chroma_dma_addr;
+ } else {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->dma_addr
+ + (win_data->fb_width * win_data->fb_height);
+ }
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+ ctx->interlace = true;
+ if (tiled_mode) {
+ luma_addr[1] = luma_addr[0] + 0x40;
+ chroma_addr[1] = chroma_addr[0] + 0x40;
+ } else {
+ luma_addr[1] = luma_addr[0] + win_data->fb_width;
+ chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+ }
+ } else {
+ ctx->interlace = false;
+ luma_addr[1] = 0;
+ chroma_addr[1] = 0;
+ }
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* interlace or progressive scan mode */
+ val = (ctx->interlace ? ~0 : 0);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+
+ /* setup format */
+ val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+
+ /* setting size of input image */
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height));
+ /* chroma height has to reduced by 2 to avoid chroma distorions */
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height / 2));
+
+ vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
+ vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
+ vp_reg_write(res, VP_SRC_H_POSITION,
+ VP_SRC_H_POSITION_VAL(win_data->fb_x));
+ vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
+
+ vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
+ vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
+ if (ctx->interlace) {
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
+ } else {
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
+ }
+
+ vp_reg_write(res, VP_H_RATIO, x_ratio);
+ vp_reg_write(res, VP_V_RATIO, y_ratio);
+
+ vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+ /* set buffer address to vp */
+ vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_layer(ctx, win, true);
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ vp_regs_dump(ctx);
+}
+
+static void mixer_layer_update(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+}
+
+static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int x_ratio, y_ratio;
+ unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ dma_addr_t dma_addr;
+ unsigned int fmt;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ #define RGB565 4
+ #define ARGB1555 5
+ #define ARGB4444 6
+ #define ARGB8888 7
+
+ switch (win_data->bpp) {
+ case 16:
+ fmt = ARGB4444;
+ break;
+ case 32:
+ fmt = ARGB8888;
+ break;
+ default:
+ fmt = ARGB8888;
+ }
+
+ /* 2x scaling feature */
+ x_ratio = 0;
+ y_ratio = 0;
+
+ dst_x_offset = win_data->crtc_x;
+ dst_y_offset = win_data->crtc_y;
+
+ /* converting dma address base and source offset */
+ dma_addr = win_data->dma_addr
+ + (win_data->fb_x * win_data->bpp >> 3)
+ + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+ src_x_offset = 0;
+ src_y_offset = 0;
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+ ctx->interlace = true;
+ else
+ ctx->interlace = false;
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* setup format */
+ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
+
+ /* setup geometry */
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+
+ /* setup display size */
+ if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
+ win == MIXER_DEFAULT_WIN) {
+ val = MXR_MXR_RES_HEIGHT(win_data->fb_height);
+ val |= MXR_MXR_RES_WIDTH(win_data->fb_width);
+ mixer_reg_write(res, MXR_RESOLUTION, val);
+ }
+
+ val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
+ val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
+ val |= MXR_GRP_WH_H_SCALE(x_ratio);
+ val |= MXR_GRP_WH_V_SCALE(y_ratio);
+ mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+
+ /* setup offsets in source image */
+ val = MXR_GRP_SXY_SX(src_x_offset);
+ val |= MXR_GRP_SXY_SY(src_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
+
+ /* setup offsets in display image */
+ val = MXR_GRP_DXY_DX(dst_x_offset);
+ val |= MXR_GRP_DXY_DY(dst_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+
+ /* set buffer address to mixer */
+ mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_layer(ctx, win, true);
+
+ /* layer update mandatory for mixer 16.0.33.0 */
+ if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+ ctx->mxr_ver == MXR_VER_128_0_0_184)
+ mixer_layer_update(ctx);
+
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void vp_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int tries = 100;
+
+ vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+ for (tries = 100; tries; --tries) {
+ /* waiting until VP_SRESET_PROCESSING is 0 */
+ if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+ break;
+ usleep_range(10000, 12000);
+ }
+ WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+ /* set output in RGB888 mode */
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+ /* 16 beat burst in DMA */
+ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > layer0 > video
+ * because typical usage scenario would be
+ * layer1 - OSD
+ * layer0 - framebuffer
+ * video - video overlay
+ */
+ val = MXR_LAYER_CFG_GRP1_VAL(3);
+ val |= MXR_LAYER_CFG_GRP0_VAL(2);
+ if (ctx->vp_enabled)
+ val |= MXR_LAYER_CFG_VP_VAL(1);
+ mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+ /* setting background color */
+ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+ /* setting graphical layers */
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* Don't blend layer 0 onto the mixer background */
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+ /* Blend layer 1 into layer 0 */
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+ /* setting video layers */
+ val = MXR_GRP_CFG_ALPHA_VAL(0);
+ mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
+ if (ctx->vp_enabled) {
+ /* configuration of Video Processor Registers */
+ vp_win_reset(ctx);
+ vp_default_filter(res);
+ }
+
+ /* disable all layers */
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ if (ctx->vp_enabled)
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+ struct mixer_context *ctx = arg;
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val, base, shadow;
+
+ spin_lock(&res->reg_slock);
+
+ /* read interrupt status for handling and clearing flags for VSYNC */
+ val = mixer_reg_read(res, MXR_INT_STATUS);
+
+ /* handling VSYNC */
+ if (val & MXR_INT_STATUS_VSYNC) {
+ /* interlace scan need to check shadow register */
+ if (ctx->interlace) {
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (base != shadow)
+ goto out;
+
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (base != shadow)
+ goto out;
+ }
+
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
+ }
+
+out:
+ /* clear interrupts */
+ if (~val & MXR_INT_EN_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val &= ~MXR_INT_EN_VSYNC;
+ val |= MXR_INT_CLEAR_VSYNC;
+ }
+ mixer_reg_write(res, MXR_INT_STATUS, val);
+
+ spin_unlock(&res->reg_slock);
+
+ return IRQ_HANDLED;
+}
+
+static int mixer_resources_init(struct mixer_context *mixer_ctx)
+{
+ struct device *dev = &mixer_ctx->pdev->dev;
+ struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+ struct resource *res;
+ int ret;
+
+ spin_lock_init(&mixer_res->reg_slock);
+
+ mixer_res->mixer = devm_clk_get(dev, "mixer");
+ if (IS_ERR(mixer_res->mixer)) {
+ dev_err(dev, "failed to get clock 'mixer'\n");
+ return -ENODEV;
+ }
+
+ mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+ if (IS_ERR(mixer_res->sclk_hdmi)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+ return -ENODEV;
+ }
+ res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ return -ENXIO;
+ }
+
+ mixer_res->mixer_regs = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (mixer_res->mixer_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ return -ENXIO;
+ }
+
+ res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(dev, res->start, mixer_irq_handler,
+ 0, "drm_mixer", mixer_ctx);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ return ret;
+ }
+ mixer_res->irq = res->start;
+
+ return 0;
+}
+
+static int vp_resources_init(struct mixer_context *mixer_ctx)
+{
+ struct device *dev = &mixer_ctx->pdev->dev;
+ struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+ struct resource *res;
+
+ mixer_res->vp = devm_clk_get(dev, "vp");
+ if (IS_ERR(mixer_res->vp)) {
+ dev_err(dev, "failed to get clock 'vp'\n");
+ return -ENODEV;
+ }
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ if (IS_ERR(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ return -ENODEV;
+ }
+ mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
+ if (IS_ERR(mixer_res->sclk_dac)) {
+ dev_err(dev, "failed to get clock 'sclk_dac'\n");
+ return -ENODEV;
+ }
+
+ if (mixer_res->sclk_hdmi)
+ clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+ res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ return -ENXIO;
+ }
+
+ mixer_res->vp_regs = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (mixer_res->vp_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int mixer_initialize(struct exynos_drm_manager *mgr,
+ struct drm_device *drm_dev)
+{
+ int ret;
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ struct exynos_drm_private *priv;
+ priv = drm_dev->dev_private;
+
+ mgr->drm_dev = mixer_ctx->drm_dev = drm_dev;
+ mgr->pipe = mixer_ctx->pipe = priv->pipe++;
+
+ /* acquire resources: regs, irqs, clocks */
+ ret = mixer_resources_init(mixer_ctx);
+ if (ret) {
+ DRM_ERROR("mixer_resources_init failed ret=%d\n", ret);
+ return ret;
+ }
+
+ if (mixer_ctx->vp_enabled) {
+ /* acquire vp resources: regs, irqs, clocks */
+ ret = vp_resources_init(mixer_ctx);
+ if (ret) {
+ DRM_ERROR("vp_resources_init failed ret=%d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!is_drm_iommu_supported(mixer_ctx->drm_dev))
+ return 0;
+
+ return drm_iommu_attach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+}
+
+static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+
+ if (is_drm_iommu_supported(mixer_ctx->drm_dev))
+ drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+}
+
+static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ if (!mixer_ctx->powered) {
+ mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
+ return 0;
+ }
+
+ /* enable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
+ MXR_INT_EN_VSYNC);
+
+ return 0;
+}
+
+static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ /* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+}
+
+static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
+ struct exynos_drm_overlay *overlay)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ struct hdmi_win_data *win_data;
+ int win;
+
+ if (!overlay) {
+ DRM_ERROR("overlay is NULL\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
+ overlay->fb_width, overlay->fb_height,
+ overlay->fb_x, overlay->fb_y,
+ overlay->crtc_width, overlay->crtc_height,
+ overlay->crtc_x, overlay->crtc_y);
+
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = MIXER_DEFAULT_WIN;
+
+ if (win < 0 || win >= MIXER_WIN_NR) {
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
+ return;
+ }
+
+ win_data = &mixer_ctx->win_data[win];
+
+ win_data->dma_addr = overlay->dma_addr[0];
+ win_data->chroma_dma_addr = overlay->dma_addr[1];
+ win_data->pixel_format = overlay->pixel_format;
+ win_data->bpp = overlay->bpp;
+
+ win_data->crtc_x = overlay->crtc_x;
+ win_data->crtc_y = overlay->crtc_y;
+ win_data->crtc_width = overlay->crtc_width;
+ win_data->crtc_height = overlay->crtc_height;
+
+ win_data->fb_x = overlay->fb_x;
+ win_data->fb_y = overlay->fb_y;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+ win_data->src_width = overlay->src_width;
+ win_data->src_height = overlay->src_height;
+
+ win_data->mode_width = overlay->mode_width;
+ win_data->mode_height = overlay->mode_height;
+
+ win_data->scan_flags = overlay->scan_flag;
+}
+
+static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
+
+ DRM_DEBUG_KMS("win: %d\n", win);
+
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ if (win > 1 && mixer_ctx->vp_enabled)
+ vp_video_buffer(mixer_ctx, win);
+ else
+ mixer_graph_buffer(mixer_ctx, win);
+
+ mixer_ctx->win_data[win].enabled = true;
+}
+
+static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+ int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("win: %d\n", win);
+
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ mixer_ctx->win_data[win].resume = false;
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(mixer_ctx, false);
+
+ mixer_cfg_layer(mixer_ctx, win, false);
+
+ mixer_vsync_set_update(mixer_ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ mixer_ctx->win_data[win].enabled = false;
+}
+
+static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *mixer_ctx = mgr->ctx;
+
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+
+ atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for MIXER to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+ !atomic_read(&mixer_ctx->wait_vsync_event),
+ HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+
+ drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
+}
+
+static void mixer_window_suspend(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *ctx = mgr->ctx;
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ mixer_win_disable(mgr, i);
+ }
+ mixer_wait_for_vblank(mgr);
+}
+
+static void mixer_window_resume(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *ctx = mgr->ctx;
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ if (win_data->enabled)
+ mixer_win_commit(mgr, i);
+ }
+}
+
+static void mixer_poweron(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *ctx = mgr->ctx;
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+
+ mutex_unlock(&ctx->mixer_mutex);
+
+ pm_runtime_get_sync(ctx->dev);
+
+ clk_prepare_enable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_prepare_enable(res->vp);
+ clk_prepare_enable(res->sclk_mixer);
+ }
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+
+ mixer_window_resume(mgr);
+}
+
+static void mixer_poweroff(struct exynos_drm_manager *mgr)
+{
+ struct mixer_context *ctx = mgr->ctx;
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_stop(ctx);
+ mixer_window_suspend(mgr);
+
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ clk_disable_unprepare(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_disable_unprepare(res->vp);
+ clk_disable_unprepare(res->sclk_mixer);
+ }
+
+ pm_runtime_put_sync(ctx->dev);
+}
+
+static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
+{
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ mixer_poweron(mgr);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ mixer_poweroff(mgr);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
+}
+
+/* Only valid for Mixer version 16.0.33.0 */
+int mixer_check_mode(struct drm_display_mode *mode)
+{
+ u32 w, h;
+
+ w = mode->hdisplay;
+ h = mode->vdisplay;
+
+ DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+ if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
+ (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+ (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct exynos_drm_manager_ops mixer_manager_ops = {
+ .dpms = mixer_dpms,
+ .enable_vblank = mixer_enable_vblank,
+ .disable_vblank = mixer_disable_vblank,
+ .wait_for_vblank = mixer_wait_for_vblank,
+ .win_mode_set = mixer_win_mode_set,
+ .win_commit = mixer_win_commit,
+ .win_disable = mixer_win_disable,
+};
+
+static struct exynos_drm_manager mixer_manager = {
+ .type = EXYNOS_DISPLAY_TYPE_HDMI,
+ .ops = &mixer_manager_ops,
+};
+
+static struct mixer_drv_data exynos5420_mxr_drv_data = {
+ .version = MXR_VER_128_0_0_184,
+ .is_vp_enabled = 0,
+};
+
+static struct mixer_drv_data exynos5250_mxr_drv_data = {
+ .version = MXR_VER_16_0_33_0,
+ .is_vp_enabled = 0,
+};
+
+static struct mixer_drv_data exynos4210_mxr_drv_data = {
+ .version = MXR_VER_0_0_0_16,
+ .is_vp_enabled = 1,
+};
+
+static struct platform_device_id mixer_driver_types[] = {
+ {
+ .name = "s5p-mixer",
+ .driver_data = (unsigned long)&exynos4210_mxr_drv_data,
+ }, {
+ .name = "exynos5-mixer",
+ .driver_data = (unsigned long)&exynos5250_mxr_drv_data,
+ }, {
+ /* end node */
+ }
+};
+
+static struct of_device_id mixer_match_types[] = {
+ {
+ .compatible = "samsung,exynos5-mixer",
+ .data = &exynos5250_mxr_drv_data,
+ }, {
+ .compatible = "samsung,exynos5250-mixer",
+ .data = &exynos5250_mxr_drv_data,
+ }, {
+ .compatible = "samsung,exynos5420-mixer",
+ .data = &exynos5420_mxr_drv_data,
+ }, {
+ /* end node */
+ }
+};
+
+static int mixer_bind(struct device *dev, struct device *manager, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm_dev = data;
+ struct mixer_context *ctx;
+ struct mixer_drv_data *drv;
+ int ret;
+
+ dev_info(dev, "probe start\n");
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc mixer context.\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ctx->mixer_mutex);
+
+ if (dev->of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(mixer_match_types, dev->of_node);
+ drv = (struct mixer_drv_data *)match->data;
+ } else {
+ drv = (struct mixer_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+
+ ctx->pdev = pdev;
+ ctx->dev = dev;
+ ctx->vp_enabled = drv->is_vp_enabled;
+ ctx->mxr_ver = drv->version;
+ init_waitqueue_head(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
+
+ mixer_manager.ctx = ctx;
+ ret = mixer_initialize(&mixer_manager, drm_dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, &mixer_manager);
+ ret = exynos_drm_crtc_create(&mixer_manager);
+ if (ret) {
+ mixer_mgr_remove(&mixer_manager);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void mixer_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
+ struct drm_crtc *crtc = mgr->crtc;
+
+ dev_info(dev, "remove successful\n");
+
+ mixer_mgr_remove(mgr);
+
+ pm_runtime_disable(dev);
+
+ crtc->funcs->destroy(crtc);
+}
+
+static const struct component_ops mixer_component_ops = {
+ .bind = mixer_bind,
+ .unbind = mixer_unbind,
+};
+
+static int mixer_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
+ mixer_manager.type);
+ if (ret)
+ return ret;
+
+ ret = component_add(&pdev->dev, &mixer_component_ops);
+ if (ret)
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+ return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mixer_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+ return 0;
+}
+
+struct platform_driver mixer_driver = {
+ .driver = {
+ .name = "exynos-mixer",
+ .owner = THIS_MODULE,
+ .of_match_table = mixer_match_types,
+ },
+ .probe = mixer_probe,
+ .remove = mixer_remove,
+ .id_table = mixer_driver_types,
+};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644
index 00000000000..3811e417f0e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EXYNOS_MIXER_H_
+#define _EXYNOS_MIXER_H_
+
+/* This function returns 0 if the given timing is valid for the mixer */
+int mixer_check_mode(struct drm_display_mode *mode);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 00000000000..30496134a3d
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,668 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2 (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1 (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2 (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3 (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4 (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1 (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2 (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3 (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4 (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1 (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2 (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3 (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4 (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL (0x58)
+/* Target area */
+#define EXYNOS_CITAREA (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2 (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0 (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0 (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0 (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1 (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1 (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1 (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5 (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6 (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7 (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8 (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9 (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10 (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11 (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12 (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13 (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14 (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15 (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16 (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17 (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18 (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19 (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20 (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21 (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22 (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23 (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24 (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25 (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26 (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27 (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28 (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29 (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30 (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31 (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32 (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5 (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6 (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7 (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8 (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9 (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10 (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11 (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12 (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13 (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14 (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15 (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16 (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17 (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18 (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19 (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20 (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21 (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22 (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23 (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24 (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25 (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26 (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27 (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28 (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29 (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30 (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31 (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32 (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5 (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6 (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7 (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8 (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9 (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10 (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11 (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12 (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13 (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14 (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15 (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16 (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17 (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18 (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19 (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20 (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21 (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22 (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23 (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24 (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25 (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26 (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27 (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28 (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29 (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30 (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31 (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32 (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP 4
+#define EXYNOS_CIOYSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOYSA1 + (__x) * 4) : \
+ (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP 1
+#define EXYNOS_CIIYSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
+#define EXYNOS_MSCTRL_ENVID (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
+#define EXYNOS_CLKSRC_SCLK (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK (0x0218)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 00000000000..9ad592707aa
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
+#define GSC_ENABLE_NORM_MODE (0 << 7)
+#define GSC_ENABLE_IPC_MODE (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE (1 << 3)
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
+#define GSC_IRQ_OR_MASK (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
+#define GSC_IN_RB_SWAP_MASK (1 << 19)
+#define GSC_IN_RB_SWAP (1 << 19)
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (2 << 14)
+#define GSC_IN_RGB_SD_WIDE (1 << 14)
+#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_C_16x16 (1 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_LOCAL_CAM3 (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
+#define GSC_IN_LOCAL_CAM1 (1 << 1)
+#define GSC_IN_LOCAL_CAM0 (0 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_LOCAL (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1C
+#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK (1 << 12)
+#define GSC_OUT_RB_SWAP (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_C_16x16 (1 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK (7 << 16)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK (7 << 0)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2C
+#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE 0x3C
+#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE 0x48
+#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON 0xA78
+#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
+#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION 0xA7C
+#define GSC_VPOS_F(x) ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT 0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT 0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644
index 00000000000..3f35ac6d8a4
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -0,0 +1,597 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+/* HDMI Version 1.3 & Common */
+#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
+#define HDMI_I2S_BASE(x) ((x) + 0x00040000)
+#define HDMI_TG_BASE(x) ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
+#define HDMI_V13_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
+#define HDMI_V13_PHY_VPLL HDMI_CTRL_BASE(0x0018)
+#define HDMI_V13_PHY_CMU HDMI_CTRL_BASE(0x001C)
+#define HDMI_V13_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
+#define HDMI_V13_PHY_STATUS HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
+#define HDMI_ENC_EN HDMI_CORE_BASE(0x0044)
+#define HDMI_V13_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
+#define HDMI_V13_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
+#define HDMI_V13_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V13_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V13_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V13_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
+#define HDMI_V13_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_V13_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_V13_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
+#define HDMI_V13_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V13_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V13_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
+#define HDMI_V13_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_V13_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_V13_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
+#define HDMI_V13_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V13_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V13_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
+#define HDMI_V13_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V13_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V13_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
+#define HDMI_V13_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V13_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V13_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
+#define HDMI_V13_ACR_CON HDMI_CORE_BASE(0x0180)
+#define HDMI_V13_AVI_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_V13_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_V13_DC_CONTROL HDMI_CORE_BASE(0x05C0)
+#define HDMI_V13_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
+#define HDMI_V13_HPD_GEN HDMI_CORE_BASE(0x05C8)
+#define HDMI_V13_AUI_CON HDMI_CORE_BASE(0x0360)
+#define HDMI_V13_SPD_CON HDMI_CORE_BASE(0x0400)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN (1 << 5)
+#define HDMI_ASP_EN (1 << 2)
+#define HDMI_ASP_DIS (0 << 2)
+#define HDMI_ASP_MASK (1 << 2)
+#define HDMI_EN (1 << 0)
+
+/* HDMI_CON_2 */
+#define HDMI_VID_PREAMBLE_DIS (1 << 5)
+#define HDMI_GUARD_BAND_DIS (1 << 1)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN (1 << 1)
+#define HDMI_MODE_DVI_EN (1 << 0)
+#define HDMI_MODE_MASK (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN (1 << 0)
+#define HDMI_FIELD_EN (1 << 1)
+
+
+/* HDMI Version 1.4 */
+/* Control registers */
+/* #define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000) */
+/* #define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004) */
+#define HDMI_HDCP_KEY_LOAD HDMI_CTRL_BASE(0x0008)
+/* #define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C) */
+#define HDMI_INTC_CON_1 HDMI_CTRL_BASE(0x0010)
+#define HDMI_INTC_FLAG_1 HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_STATUS_0 HDMI_CTRL_BASE(0x0020)
+#define HDMI_PHY_STATUS_CMU HDMI_CTRL_BASE(0x0024)
+#define HDMI_PHY_STATUS_PLL HDMI_CTRL_BASE(0x0028)
+#define HDMI_PHY_CON_0 HDMI_CTRL_BASE(0x0030)
+#define HDMI_HPD_CTRL HDMI_CTRL_BASE(0x0040)
+#define HDMI_HPD_ST HDMI_CTRL_BASE(0x0044)
+#define HDMI_HPD_TH_X HDMI_CTRL_BASE(0x0050)
+#define HDMI_AUDIO_CLKSEL HDMI_CTRL_BASE(0x0070)
+#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0074)
+#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0078)
+#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
+#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
+
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN (1 << 0)
+
+/* Video related registers */
+#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
+#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
+#define HDMI_CMAX HDMI_CORE_BASE(0x0068)
+#define HDMI_CMIN HDMI_CORE_BASE(0x006C)
+
+#define HDMI_V2_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V2_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V1_BLANK_0 HDMI_CORE_BASE(0x00B8)
+#define HDMI_V1_BLANK_1 HDMI_CORE_BASE(0x00BC)
+
+#define HDMI_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_LINE_0 HDMI_CORE_BASE(0x00C8)
+#define HDMI_H_LINE_1 HDMI_CORE_BASE(0x00CC)
+
+#define HDMI_HSYNC_POL HDMI_CORE_BASE(0x00E0)
+
+#define HDMI_V_BLANK_F0_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F0_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F1_0 HDMI_CORE_BASE(0x0118)
+#define HDMI_V_BLANK_F1_1 HDMI_CORE_BASE(0x011C)
+
+#define HDMI_H_SYNC_START_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_START_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_END_0 HDMI_CORE_BASE(0x0128)
+#define HDMI_H_SYNC_END_1 HDMI_CORE_BASE(0x012C)
+
+#define HDMI_V_SYNC_LINE_BEF_2_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_LINE_BEF_2_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_LINE_BEF_1_0 HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_LINE_BEF_1_1 HDMI_CORE_BASE(0x013C)
+
+#define HDMI_V_SYNC_LINE_AFT_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_LINE_AFT_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_LINE_AFT_1_0 HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_LINE_AFT_1_1 HDMI_CORE_BASE(0x014C)
+
+#define HDMI_V_SYNC_LINE_AFT_PXL_2_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_LINE_AFT_PXL_2_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_LINE_AFT_PXL_1_0 HDMI_CORE_BASE(0x0158)
+#define HDMI_V_SYNC_LINE_AFT_PXL_1_1 HDMI_CORE_BASE(0x015C)
+
+#define HDMI_V_BLANK_F2_0 HDMI_CORE_BASE(0x0160)
+#define HDMI_V_BLANK_F2_1 HDMI_CORE_BASE(0x0164)
+#define HDMI_V_BLANK_F3_0 HDMI_CORE_BASE(0x0168)
+#define HDMI_V_BLANK_F3_1 HDMI_CORE_BASE(0x016C)
+#define HDMI_V_BLANK_F4_0 HDMI_CORE_BASE(0x0170)
+#define HDMI_V_BLANK_F4_1 HDMI_CORE_BASE(0x0174)
+#define HDMI_V_BLANK_F5_0 HDMI_CORE_BASE(0x0178)
+#define HDMI_V_BLANK_F5_1 HDMI_CORE_BASE(0x017C)
+
+#define HDMI_V_SYNC_LINE_AFT_3_0 HDMI_CORE_BASE(0x0180)
+#define HDMI_V_SYNC_LINE_AFT_3_1 HDMI_CORE_BASE(0x0184)
+#define HDMI_V_SYNC_LINE_AFT_4_0 HDMI_CORE_BASE(0x0188)
+#define HDMI_V_SYNC_LINE_AFT_4_1 HDMI_CORE_BASE(0x018C)
+#define HDMI_V_SYNC_LINE_AFT_5_0 HDMI_CORE_BASE(0x0190)
+#define HDMI_V_SYNC_LINE_AFT_5_1 HDMI_CORE_BASE(0x0194)
+#define HDMI_V_SYNC_LINE_AFT_6_0 HDMI_CORE_BASE(0x0198)
+#define HDMI_V_SYNC_LINE_AFT_6_1 HDMI_CORE_BASE(0x019C)
+
+#define HDMI_V_SYNC_LINE_AFT_PXL_3_0 HDMI_CORE_BASE(0x01A0)
+#define HDMI_V_SYNC_LINE_AFT_PXL_3_1 HDMI_CORE_BASE(0x01A4)
+#define HDMI_V_SYNC_LINE_AFT_PXL_4_0 HDMI_CORE_BASE(0x01A8)
+#define HDMI_V_SYNC_LINE_AFT_PXL_4_1 HDMI_CORE_BASE(0x01AC)
+#define HDMI_V_SYNC_LINE_AFT_PXL_5_0 HDMI_CORE_BASE(0x01B0)
+#define HDMI_V_SYNC_LINE_AFT_PXL_5_1 HDMI_CORE_BASE(0x01B4)
+#define HDMI_V_SYNC_LINE_AFT_PXL_6_0 HDMI_CORE_BASE(0x01B8)
+#define HDMI_V_SYNC_LINE_AFT_PXL_6_1 HDMI_CORE_BASE(0x01BC)
+
+#define HDMI_VACT_SPACE_1_0 HDMI_CORE_BASE(0x01C0)
+#define HDMI_VACT_SPACE_1_1 HDMI_CORE_BASE(0x01C4)
+#define HDMI_VACT_SPACE_2_0 HDMI_CORE_BASE(0x01C8)
+#define HDMI_VACT_SPACE_2_1 HDMI_CORE_BASE(0x01CC)
+#define HDMI_VACT_SPACE_3_0 HDMI_CORE_BASE(0x01D0)
+#define HDMI_VACT_SPACE_3_1 HDMI_CORE_BASE(0x01D4)
+#define HDMI_VACT_SPACE_4_0 HDMI_CORE_BASE(0x01D8)
+#define HDMI_VACT_SPACE_4_1 HDMI_CORE_BASE(0x01DC)
+#define HDMI_VACT_SPACE_5_0 HDMI_CORE_BASE(0x01E0)
+#define HDMI_VACT_SPACE_5_1 HDMI_CORE_BASE(0x01E4)
+#define HDMI_VACT_SPACE_6_0 HDMI_CORE_BASE(0x01E8)
+#define HDMI_VACT_SPACE_6_1 HDMI_CORE_BASE(0x01EC)
+
+#define HDMI_GCP_CON HDMI_CORE_BASE(0x0200)
+#define HDMI_GCP_BYTE1 HDMI_CORE_BASE(0x0210)
+#define HDMI_GCP_BYTE2 HDMI_CORE_BASE(0x0214)
+#define HDMI_GCP_BYTE3 HDMI_CORE_BASE(0x0218)
+
+/* Audio related registers */
+#define HDMI_ASP_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_ASP_SP_FLAT HDMI_CORE_BASE(0x0304)
+#define HDMI_ASP_CHCFG0 HDMI_CORE_BASE(0x0310)
+#define HDMI_ASP_CHCFG1 HDMI_CORE_BASE(0x0314)
+#define HDMI_ASP_CHCFG2 HDMI_CORE_BASE(0x0318)
+#define HDMI_ASP_CHCFG3 HDMI_CORE_BASE(0x031C)
+
+#define HDMI_ACR_CON HDMI_CORE_BASE(0x0400)
+#define HDMI_ACR_MCTS0 HDMI_CORE_BASE(0x0410)
+#define HDMI_ACR_MCTS1 HDMI_CORE_BASE(0x0414)
+#define HDMI_ACR_MCTS2 HDMI_CORE_BASE(0x0418)
+#define HDMI_ACR_CTS0 HDMI_CORE_BASE(0x0420)
+#define HDMI_ACR_CTS1 HDMI_CORE_BASE(0x0424)
+#define HDMI_ACR_CTS2 HDMI_CORE_BASE(0x0428)
+#define HDMI_ACR_N0 HDMI_CORE_BASE(0x0430)
+#define HDMI_ACR_N1 HDMI_CORE_BASE(0x0434)
+#define HDMI_ACR_N2 HDMI_CORE_BASE(0x0438)
+
+/* Packet related registers */
+#define HDMI_ACP_CON HDMI_CORE_BASE(0x0500)
+#define HDMI_ACP_TYPE HDMI_CORE_BASE(0x0514)
+#define HDMI_ACP_DATA(n) HDMI_CORE_BASE(0x0520 + 4 * (n))
+
+#define HDMI_ISRC_CON HDMI_CORE_BASE(0x0600)
+#define HDMI_ISRC1_HEADER1 HDMI_CORE_BASE(0x0614)
+#define HDMI_ISRC1_DATA(n) HDMI_CORE_BASE(0x0620 + 4 * (n))
+#define HDMI_ISRC2_DATA(n) HDMI_CORE_BASE(0x06A0 + 4 * (n))
+
+#define HDMI_AVI_CON HDMI_CORE_BASE(0x0700)
+#define HDMI_AVI_HEADER0 HDMI_CORE_BASE(0x0710)
+#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
+#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
+#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
+
+#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
+#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
+#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
+#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
+#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
+
+#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
+#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
+#define HDMI_MPG_DATA(n) HDMI_CORE_BASE(0x0920 + 4 * (n))
+
+#define HDMI_SPD_CON HDMI_CORE_BASE(0x0A00)
+#define HDMI_SPD_HEADER0 HDMI_CORE_BASE(0x0A10)
+#define HDMI_SPD_HEADER1 HDMI_CORE_BASE(0x0A14)
+#define HDMI_SPD_HEADER2 HDMI_CORE_BASE(0x0A18)
+#define HDMI_SPD_DATA(n) HDMI_CORE_BASE(0x0A20 + 4 * (n))
+
+#define HDMI_GAMUT_CON HDMI_CORE_BASE(0x0B00)
+#define HDMI_GAMUT_HEADER0 HDMI_CORE_BASE(0x0B10)
+#define HDMI_GAMUT_HEADER1 HDMI_CORE_BASE(0x0B14)
+#define HDMI_GAMUT_HEADER2 HDMI_CORE_BASE(0x0B18)
+#define HDMI_GAMUT_METADATA(n) HDMI_CORE_BASE(0x0B20 + 4 * (n))
+
+#define HDMI_VSI_CON HDMI_CORE_BASE(0x0C00)
+#define HDMI_VSI_HEADER0 HDMI_CORE_BASE(0x0C10)
+#define HDMI_VSI_HEADER1 HDMI_CORE_BASE(0x0C14)
+#define HDMI_VSI_HEADER2 HDMI_CORE_BASE(0x0C18)
+#define HDMI_VSI_DATA(n) HDMI_CORE_BASE(0x0C20 + 4 * (n))
+
+#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x0D00)
+#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x0D04)
+
+#define HDMI_AN_SEED_SEL HDMI_CORE_BASE(0x0E48)
+#define HDMI_AN_SEED_0 HDMI_CORE_BASE(0x0E58)
+#define HDMI_AN_SEED_1 HDMI_CORE_BASE(0x0E5C)
+#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
+#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
+
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+
+/* HDCP related registers */
+#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
+#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
+
+#define HDMI_HDCP_KSV_LIST_CON HDMI_CORE_BASE(0x7064)
+#define HDMI_HDCP_SHA_RESULT HDMI_CORE_BASE(0x7070)
+#define HDMI_HDCP_CTRL1 HDMI_CORE_BASE(0x7080)
+#define HDMI_HDCP_CTRL2 HDMI_CORE_BASE(0x7084)
+#define HDMI_HDCP_CHECK_RESULT HDMI_CORE_BASE(0x7090)
+#define HDMI_HDCP_BKSV(n) HDMI_CORE_BASE(0x70A0 + 4 * (n))
+#define HDMI_HDCP_AKSV(n) HDMI_CORE_BASE(0x70C0 + 4 * (n))
+#define HDMI_HDCP_AN(n) HDMI_CORE_BASE(0x70E0 + 4 * (n))
+
+#define HDMI_HDCP_BCAPS HDMI_CORE_BASE(0x7100)
+#define HDMI_HDCP_BSTATUS_0 HDMI_CORE_BASE(0x7110)
+#define HDMI_HDCP_BSTATUS_1 HDMI_CORE_BASE(0x7114)
+#define HDMI_HDCP_RI_0 HDMI_CORE_BASE(0x7140)
+#define HDMI_HDCP_RI_1 HDMI_CORE_BASE(0x7144)
+#define HDMI_HDCP_I2C_INT HDMI_CORE_BASE(0x7180)
+#define HDMI_HDCP_AN_INT HDMI_CORE_BASE(0x7190)
+#define HDMI_HDCP_WDT_INT HDMI_CORE_BASE(0x71A0)
+#define HDMI_HDCP_RI_INT HDMI_CORE_BASE(0x71B0)
+#define HDMI_HDCP_RI_COMPARE_0 HDMI_CORE_BASE(0x71D0)
+#define HDMI_HDCP_RI_COMPARE_1 HDMI_CORE_BASE(0x71D4)
+#define HDMI_HDCP_FRAME_COUNT HDMI_CORE_BASE(0x71E0)
+
+#define HDMI_RGB_ROUND_EN HDMI_CORE_BASE(0xD500)
+#define HDMI_VACT_SPACE_R_0 HDMI_CORE_BASE(0xD504)
+#define HDMI_VACT_SPACE_R_1 HDMI_CORE_BASE(0xD508)
+#define HDMI_VACT_SPACE_G_0 HDMI_CORE_BASE(0xD50C)
+#define HDMI_VACT_SPACE_G_1 HDMI_CORE_BASE(0xD510)
+#define HDMI_VACT_SPACE_B_0 HDMI_CORE_BASE(0xD514)
+#define HDMI_VACT_SPACE_B_1 HDMI_CORE_BASE(0xD518)
+
+#define HDMI_BLUE_SCREEN_B_0 HDMI_CORE_BASE(0xD520)
+#define HDMI_BLUE_SCREEN_B_1 HDMI_CORE_BASE(0xD524)
+#define HDMI_BLUE_SCREEN_G_0 HDMI_CORE_BASE(0xD528)
+#define HDMI_BLUE_SCREEN_G_1 HDMI_CORE_BASE(0xD52C)
+#define HDMI_BLUE_SCREEN_R_0 HDMI_CORE_BASE(0xD530)
+#define HDMI_BLUE_SCREEN_R_1 HDMI_CORE_BASE(0xD534)
+
+/* HDMI I2S register */
+#define HDMI_I2S_CLK_CON HDMI_I2S_BASE(0x000)
+#define HDMI_I2S_CON_1 HDMI_I2S_BASE(0x004)
+#define HDMI_I2S_CON_2 HDMI_I2S_BASE(0x008)
+#define HDMI_I2S_PIN_SEL_0 HDMI_I2S_BASE(0x00c)
+#define HDMI_I2S_PIN_SEL_1 HDMI_I2S_BASE(0x010)
+#define HDMI_I2S_PIN_SEL_2 HDMI_I2S_BASE(0x014)
+#define HDMI_I2S_PIN_SEL_3 HDMI_I2S_BASE(0x018)
+#define HDMI_I2S_DSD_CON HDMI_I2S_BASE(0x01c)
+#define HDMI_I2S_MUX_CON HDMI_I2S_BASE(0x020)
+#define HDMI_I2S_CH_ST_CON HDMI_I2S_BASE(0x024)
+#define HDMI_I2S_CH_ST_0 HDMI_I2S_BASE(0x028)
+#define HDMI_I2S_CH_ST_1 HDMI_I2S_BASE(0x02c)
+#define HDMI_I2S_CH_ST_2 HDMI_I2S_BASE(0x030)
+#define HDMI_I2S_CH_ST_3 HDMI_I2S_BASE(0x034)
+#define HDMI_I2S_CH_ST_4 HDMI_I2S_BASE(0x038)
+#define HDMI_I2S_CH_ST_SH_0 HDMI_I2S_BASE(0x03c)
+#define HDMI_I2S_CH_ST_SH_1 HDMI_I2S_BASE(0x040)
+#define HDMI_I2S_CH_ST_SH_2 HDMI_I2S_BASE(0x044)
+#define HDMI_I2S_CH_ST_SH_3 HDMI_I2S_BASE(0x048)
+#define HDMI_I2S_CH_ST_SH_4 HDMI_I2S_BASE(0x04c)
+#define HDMI_I2S_MUX_CH HDMI_I2S_BASE(0x054)
+#define HDMI_I2S_MUX_CUV HDMI_I2S_BASE(0x058)
+
+/* I2S bit definition */
+
+/* I2S_CLK_CON */
+#define HDMI_I2S_CLK_DIS (0)
+#define HDMI_I2S_CLK_EN (1)
+
+/* I2S_CON_1 */
+#define HDMI_I2S_SCLK_FALLING_EDGE (0 << 1)
+#define HDMI_I2S_SCLK_RISING_EDGE (1 << 1)
+#define HDMI_I2S_L_CH_LOW_POL (0)
+#define HDMI_I2S_L_CH_HIGH_POL (1)
+
+/* I2S_CON_2 */
+#define HDMI_I2S_MSB_FIRST_MODE (0 << 6)
+#define HDMI_I2S_LSB_FIRST_MODE (1 << 6)
+#define HDMI_I2S_BIT_CH_32FS (0 << 4)
+#define HDMI_I2S_BIT_CH_48FS (1 << 4)
+#define HDMI_I2S_BIT_CH_RESERVED (2 << 4)
+#define HDMI_I2S_SDATA_16BIT (1 << 2)
+#define HDMI_I2S_SDATA_20BIT (2 << 2)
+#define HDMI_I2S_SDATA_24BIT (3 << 2)
+#define HDMI_I2S_BASIC_FORMAT (0)
+#define HDMI_I2S_L_JUST_FORMAT (2)
+#define HDMI_I2S_R_JUST_FORMAT (3)
+#define HDMI_I2S_CON_2_CLR (~(0xFF))
+#define HDMI_I2S_SET_BIT_CH(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SET_SDATA_BIT(x) (((x) & 0x7) << 2)
+
+/* I2S_PIN_SEL_0 */
+#define HDMI_I2S_SEL_SCLK(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_LRCK(x) ((x) & 0x7)
+
+/* I2S_PIN_SEL_1 */
+#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7)
+
+/* I2S_PIN_SEL_2 */
+#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7)
+
+/* I2S_PIN_SEL_3 */
+#define HDMI_I2S_SEL_DSD(x) ((x) & 0x7)
+
+/* I2S_DSD_CON */
+#define HDMI_I2S_DSD_CLK_RI_EDGE (1 << 1)
+#define HDMI_I2S_DSD_CLK_FA_EDGE (0 << 1)
+#define HDMI_I2S_DSD_ENABLE (1)
+#define HDMI_I2S_DSD_DISABLE (0)
+
+/* I2S_MUX_CON */
+#define HDMI_I2S_NOISE_FILTER_ZERO (0 << 5)
+#define HDMI_I2S_NOISE_FILTER_2_STAGE (1 << 5)
+#define HDMI_I2S_NOISE_FILTER_3_STAGE (2 << 5)
+#define HDMI_I2S_NOISE_FILTER_4_STAGE (3 << 5)
+#define HDMI_I2S_NOISE_FILTER_5_STAGE (4 << 5)
+#define HDMI_I2S_IN_DISABLE (1 << 4)
+#define HDMI_I2S_IN_ENABLE (0 << 4)
+#define HDMI_I2S_AUD_SPDIF (0 << 2)
+#define HDMI_I2S_AUD_I2S (1 << 2)
+#define HDMI_I2S_AUD_DSD (2 << 2)
+#define HDMI_I2S_CUV_SPDIF_ENABLE (0 << 1)
+#define HDMI_I2S_CUV_I2S_ENABLE (1 << 1)
+#define HDMI_I2S_MUX_DISABLE (0)
+#define HDMI_I2S_MUX_ENABLE (1)
+#define HDMI_I2S_MUX_CON_CLR (~(0xFF))
+
+/* I2S_CH_ST_CON */
+#define HDMI_I2S_CH_STATUS_RELOAD (1)
+#define HDMI_I2S_CH_ST_CON_CLR (~(1))
+
+/* I2S_CH_ST_0 / I2S_CH_ST_SH_0 */
+#define HDMI_I2S_CH_STATUS_MODE_0 (0 << 6)
+#define HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH (0 << 3)
+#define HDMI_I2S_2AUD_CH_WITH_PREEMPH (1 << 3)
+#define HDMI_I2S_DEFAULT_EMPHASIS (0 << 3)
+#define HDMI_I2S_COPYRIGHT (0 << 2)
+#define HDMI_I2S_NO_COPYRIGHT (1 << 2)
+#define HDMI_I2S_LINEAR_PCM (0 << 1)
+#define HDMI_I2S_NO_LINEAR_PCM (1 << 1)
+#define HDMI_I2S_CONSUMER_FORMAT (0)
+#define HDMI_I2S_PROF_FORMAT (1)
+#define HDMI_I2S_CH_ST_0_CLR (~(0xFF))
+
+/* I2S_CH_ST_1 / I2S_CH_ST_SH_1 */
+#define HDMI_I2S_CD_PLAYER (0x00)
+#define HDMI_I2S_DAT_PLAYER (0x03)
+#define HDMI_I2S_DCC_PLAYER (0x43)
+#define HDMI_I2S_MINI_DISC_PLAYER (0x49)
+
+/* I2S_CH_ST_2 / I2S_CH_ST_SH_2 */
+#define HDMI_I2S_CHANNEL_NUM_MASK (0xF << 4)
+#define HDMI_I2S_SOURCE_NUM_MASK (0xF)
+#define HDMI_I2S_SET_CHANNEL_NUM(x) (((x) & (0xF)) << 4)
+#define HDMI_I2S_SET_SOURCE_NUM(x) ((x) & (0xF))
+
+/* I2S_CH_ST_3 / I2S_CH_ST_SH_3 */
+#define HDMI_I2S_CLK_ACCUR_LEVEL_1 (1 << 4)
+#define HDMI_I2S_CLK_ACCUR_LEVEL_2 (0 << 4)
+#define HDMI_I2S_CLK_ACCUR_LEVEL_3 (2 << 4)
+#define HDMI_I2S_SMP_FREQ_44_1 (0x0)
+#define HDMI_I2S_SMP_FREQ_48 (0x2)
+#define HDMI_I2S_SMP_FREQ_32 (0x3)
+#define HDMI_I2S_SMP_FREQ_96 (0xA)
+#define HDMI_I2S_SET_SMP_FREQ(x) ((x) & (0xF))
+
+/* I2S_CH_ST_4 / I2S_CH_ST_SH_4 */
+#define HDMI_I2S_ORG_SMP_FREQ_44_1 (0xF << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_88_2 (0x7 << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_22_05 (0xB << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_176_4 (0x3 << 4)
+#define HDMI_I2S_WORD_LEN_NOT_DEFINE (0x0 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_20BITS (0x1 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_22BITS (0x2 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_23BITS (0x4 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_24BITS (0x5 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_21BITS (0x6 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_16BITS (0x1 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_18BITS (0x2 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_19BITS (0x4 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_20BITS (0x5 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_17BITS (0x6 << 1)
+#define HDMI_I2S_WORD_LEN_MAX_24BITS (1)
+#define HDMI_I2S_WORD_LEN_MAX_20BITS (0)
+
+/* I2S_MUX_CH */
+#define HDMI_I2S_CH3_R_EN (1 << 7)
+#define HDMI_I2S_CH3_L_EN (1 << 6)
+#define HDMI_I2S_CH3_EN (3 << 6)
+#define HDMI_I2S_CH2_R_EN (1 << 5)
+#define HDMI_I2S_CH2_L_EN (1 << 4)
+#define HDMI_I2S_CH2_EN (3 << 4)
+#define HDMI_I2S_CH1_R_EN (1 << 3)
+#define HDMI_I2S_CH1_L_EN (1 << 2)
+#define HDMI_I2S_CH1_EN (3 << 2)
+#define HDMI_I2S_CH0_R_EN (1 << 1)
+#define HDMI_I2S_CH0_L_EN (1)
+#define HDMI_I2S_CH0_EN (3)
+#define HDMI_I2S_CH_ALL_EN (0xFF)
+#define HDMI_I2S_MUX_CH_CLR (~HDMI_I2S_CH_ALL_EN)
+
+/* I2S_MUX_CUV */
+#define HDMI_I2S_CUV_R_EN (1 << 1)
+#define HDMI_I2S_CUV_L_EN (1)
+#define HDMI_I2S_CUV_RL_EN (0x03)
+
+/* I2S_CUV_L_R */
+#define HDMI_I2S_CUV_R_DATA_MASK (0x7 << 4)
+#define HDMI_I2S_CUV_L_DATA_MASK (0x7)
+
+/* Timing generator registers */
+/* TG configure/status registers */
+#define HDMI_TG_VACT_ST3_L HDMI_TG_BASE(0x0068)
+#define HDMI_TG_VACT_ST3_H HDMI_TG_BASE(0x006c)
+#define HDMI_TG_VACT_ST4_L HDMI_TG_BASE(0x0070)
+#define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074)
+#define HDMI_TG_3D HDMI_TG_BASE(0x00F0)
+
+/* HDMI PHY Registers Offsets*/
+#define HDMIPHY_POWER (0x74 >> 2)
+#define HDMIPHY_MODE_SET_DONE (0x7c >> 2)
+
+/* HDMI PHY Values */
+#define HDMI_PHY_POWER_ON 0x80
+#define HDMI_PHY_POWER_OFF 0xff
+
+/* HDMI PHY Values */
+#define HDMI_PHY_DISABLE_MODE_SET 0x80
+#define HDMI_PHY_ENABLE_MODE_SET 0x00
+
+/* PMU Registers for PHY */
+#define PMU_HDMI_PHY_CONTROL 0x700
+#define PMU_HDMI_PHY_ENABLE_BIT BIT(0)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644
index 00000000000..5f32e1a2941
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -0,0 +1,152 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-mixer.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS 0x0000
+#define MXR_CFG 0x0004
+#define MXR_INT_EN 0x0008
+#define MXR_INT_STATUS 0x000C
+#define MXR_LAYER_CFG 0x0010
+#define MXR_VIDEO_CFG 0x0014
+#define MXR_GRAPHIC0_CFG 0x0020
+#define MXR_GRAPHIC0_BASE 0x0024
+#define MXR_GRAPHIC0_SPAN 0x0028
+#define MXR_GRAPHIC0_SXY 0x002C
+#define MXR_GRAPHIC0_WH 0x0030
+#define MXR_GRAPHIC0_DXY 0x0034
+#define MXR_GRAPHIC0_BLANK 0x0038
+#define MXR_GRAPHIC1_CFG 0x0040
+#define MXR_GRAPHIC1_BASE 0x0044
+#define MXR_GRAPHIC1_SPAN 0x0048
+#define MXR_GRAPHIC1_SXY 0x004C
+#define MXR_GRAPHIC1_WH 0x0050
+#define MXR_GRAPHIC1_DXY 0x0054
+#define MXR_GRAPHIC1_BLANK 0x0058
+#define MXR_BG_CFG 0x0060
+#define MXR_BG_COLOR0 0x0064
+#define MXR_BG_COLOR1 0x0068
+#define MXR_BG_COLOR2 0x006C
+#define MXR_CM_COEFF_Y 0x0080
+#define MXR_CM_COEFF_CB 0x0084
+#define MXR_CM_COEFF_CR 0x0088
+#define MXR_MO 0x0304
+#define MXR_RESOLUTION 0x0310
+
+#define MXR_GRAPHIC0_BASE_S 0x2024
+#define MXR_GRAPHIC1_BASE_S 0x2044
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
+#define MXR_GRAPHIC_BLANK(i) (0x0038 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE_S(i) (0x2024 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_SOFT_RESET (1 << 8)
+#define MXR_STATUS_16_BURST (1 << 7)
+#define MXR_STATUS_BURST_MASK (1 << 7)
+#define MXR_STATUS_BIG_ENDIAN (1 << 3)
+#define MXR_STATUS_ENDIAN_MASK (1 << 3)
+#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_IDLE (1 << 1)
+#define MXR_STATUS_REG_RUN (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_LAYER_UPDATE (1 << 31)
+#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
+#define MXR_CFG_RGB601_0_255 (0 << 9)
+#define MXR_CFG_RGB601_16_235 (1 << 9)
+#define MXR_CFG_RGB709_0_255 (2 << 9)
+#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_RGB_FMT_MASK 0x600
+#define MXR_CFG_OUT_YUV444 (0 << 8)
+#define MXR_CFG_OUT_RGB888 (1 << 8)
+#define MXR_CFG_OUT_MASK (1 << 8)
+#define MXR_CFG_DST_SDO (0 << 7)
+#define MXR_CFG_DST_HDMI (1 << 7)
+#define MXR_CFG_DST_MASK (1 << 7)
+#define MXR_CFG_SCAN_HD_720 (0 << 6)
+#define MXR_CFG_SCAN_HD_1080 (1 << 6)
+#define MXR_CFG_GRP1_ENABLE (1 << 5)
+#define MXR_CFG_GRP0_ENABLE (1 << 4)
+#define MXR_CFG_VP_ENABLE (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_NTSC (0 << 1)
+#define MXR_CFG_SCAN_PAL (1 << 1)
+#define MXR_CFG_SCAN_SD (0 << 0)
+#define MXR_CFG_SCAN_HD (1 << 0)
+#define MXR_CFG_SCAN_MASK 0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
+#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17)
+#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16)
+#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_RESOLUTION */
+#define MXR_MXR_RES_HEIGHT(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_MXR_RES_WIDTH(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC (1 << 11)
+#define MXR_INT_EN_ALL (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC (1 << 11)
+#define MXR_INT_STATUS_VSYNC (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 00000000000..a09ac6e180d
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG 0x00
+#define ROT_CONFIG_IRQ (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL 0x10
+#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
+#define ROT_CONTROL_FMT_RGB888 (6 << 8)
+#define ROT_CONTROL_FMT_MASK (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
+#define ROT_CONTROL_FLIP_MASK (3 << 6)
+#define ROT_CONTROL_ROT_90 (1 << 4)
+#define ROT_CONTROL_ROT_180 (2 << 4)
+#define ROT_CONTROL_ROT_270 (3 << 4)
+#define ROT_CONTROL_ROT_MASK (3 << 4)
+#define ROT_CONTROL_START (1 << 0)
+
+/* Status */
+#define ROT_STATUS 0x20
+#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
+#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE 1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE 0x3c
+#define ROT_DST_BUF_SIZE 0x5c
+#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS 0x40
+#define ROT_DST_CROP_POS 0x60
+#define ROT_CROP_POS_Y(x) ((x) << 16)
+#define ROT_CROP_POS_X(x) ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE 0x44
+#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask) ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644
index 00000000000..10b737af0a7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-vp.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-vp.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE 0x0000
+#define VP_SRESET 0x0004
+#define VP_SHADOW_UPDATE 0x0008
+#define VP_FIELD_ID 0x000C
+#define VP_MODE 0x0010
+#define VP_IMG_SIZE_Y 0x0014
+#define VP_IMG_SIZE_C 0x0018
+#define VP_PER_RATE_CTRL 0x001C
+#define VP_TOP_Y_PTR 0x0028
+#define VP_BOT_Y_PTR 0x002C
+#define VP_TOP_C_PTR 0x0030
+#define VP_BOT_C_PTR 0x0034
+#define VP_ENDIAN_MODE 0x03CC
+#define VP_SRC_H_POSITION 0x0044
+#define VP_SRC_V_POSITION 0x0048
+#define VP_SRC_WIDTH 0x004C
+#define VP_SRC_HEIGHT 0x0050
+#define VP_DST_H_POSITION 0x0054
+#define VP_DST_V_POSITION 0x0058
+#define VP_DST_WIDTH 0x005C
+#define VP_DST_HEIGHT 0x0060
+#define VP_H_RATIO 0x0064
+#define VP_V_RATIO 0x0068
+#define VP_POLY8_Y0_LL 0x006C
+#define VP_POLY4_Y0_LL 0x00EC
+#define VP_POLY4_C0_LL 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12 (0 << 6)
+#define VP_MODE_NV21 (1 << 6)
+#define VP_MODE_LINE_SKIP (1 << 5)
+#define VP_MODE_MEM_LINEAR (0 << 4)
+#define VP_MODE_MEM_TILED (1 << 4)
+#define VP_MODE_FMT_MASK (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
new file mode 100644
index 00000000000..17f928ec84e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -0,0 +1,38 @@
+config DRM_GMA500
+ tristate "Intel GMA5/600 KMS Framebuffer"
+ depends on DRM && PCI && X86
+ select FB_CFB_COPYAREA
+ select FB_CFB_FILLRECT
+ select FB_CFB_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_TTM
+ # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
+ select ACPI_VIDEO if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select INPUT if ACPI
+ help
+ Say yes for an experimental 2D KMS framebuffer driver for the
+ Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+ devices.
+
+config DRM_GMA600
+ bool "Intel GMA600 support (Experimental)"
+ depends on DRM_GMA500
+ help
+ Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+ platforms with LVDS ports. MIPI is not currently supported.
+
+config DRM_GMA3600
+ bool "Intel GMA3600/3650 support (Experimental)"
+ depends on DRM_GMA500
+ help
+ Say yes to include basic support for Intel GMA3600/3650 (Intel
+ Cedar Trail) platforms.
+
+config DRM_MEDFIELD
+ bool "Intel Medfield support (Experimental)"
+ depends on DRM_GMA500 && X86_INTEL_MID
+ help
+ Say yes to include support for the Intel Medfield platform.
+
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
new file mode 100644
index 00000000000..b1531557637
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -0,0 +1,55 @@
+#
+# KMS driver for the GMA500
+#
+ccflags-y += -I$(srctree)/include/drm
+
+gma500_gfx-y += \
+ accel_2d.o \
+ backlight.o \
+ framebuffer.o \
+ gem.o \
+ gtt.o \
+ intel_bios.o \
+ intel_i2c.o \
+ intel_gmbus.o \
+ mmu.o \
+ blitter.o \
+ power.o \
+ psb_drv.o \
+ gma_display.o \
+ gma_device.o \
+ psb_intel_display.o \
+ psb_intel_lvds.o \
+ psb_intel_modes.o \
+ psb_intel_sdvo.o \
+ psb_lid.o \
+ psb_irq.o \
+ psb_device.o \
+ mid_bios.o
+
+gma500_gfx-$(CONFIG_ACPI) += opregion.o \
+
+gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
+ cdv_intel_crt.o \
+ cdv_intel_display.o \
+ cdv_intel_hdmi.o \
+ cdv_intel_lvds.o \
+ cdv_intel_dp.o
+
+gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
+ oaktrail_crtc.o \
+ oaktrail_lvds.o \
+ oaktrail_hdmi.o \
+ oaktrail_hdmi_i2c.o
+
+gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \
+ mdfld_output.o \
+ mdfld_intel_display.o \
+ mdfld_dsi_output.o \
+ mdfld_dsi_dpi.o \
+ mdfld_dsi_pkg_sender.o \
+ mdfld_tpo_vid.o \
+ mdfld_tmd_vid.o \
+ tc35876x-dsi-lvds.o
+
+obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
new file mode 100644
index 00000000000..de6f62a6ceb
--- /dev/null
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -0,0 +1,364 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "framebuffer.h"
+
+/**
+ * psb_spank - reset the 2D engine
+ * @dev_priv: our PSB DRM device
+ *
+ * Soft reset the graphics engine and then reload the necessary registers.
+ * We use this at initialisation time but it will become relevant for
+ * accelerated X later
+ */
+void psb_spank(struct drm_psb_private *dev_priv)
+{
+ PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+ _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+ _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+ PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+ msleep(1);
+
+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+ wmb();
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ wmb();
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+ msleep(1);
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
+/**
+ * psb2_2d_wait_available - wait for FIFO room
+ * @dev_priv: our DRM device
+ * @size: size (in dwords) of the command we want to issue
+ *
+ * Wait until there is room to load the FIFO with our data. If the
+ * device is not responding then reset it
+ */
+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+ unsigned size)
+{
+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+ unsigned long t = jiffies + HZ;
+
+ while (avail < size) {
+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+ if (time_after(jiffies, t)) {
+ psb_spank(dev_priv);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+/**
+ * psb_2d_submit - submit a 2D command
+ * @dev_priv: our DRM device
+ * @cmdbuf: command to issue
+ * @size: length (in dwords)
+ *
+ * Issue one or more 2D commands to the accelerator. This needs to be
+ * serialized later when we add the GEM interfaces for acceleration
+ */
+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+ unsigned size)
+{
+ int ret = 0;
+ int i;
+ unsigned submit_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
+ while (size > 0) {
+ submit_size = (size < 0x60) ? size : 0x60;
+ size -= submit_size;
+ ret = psb_2d_wait_available(dev_priv, submit_size);
+ if (ret)
+ break;
+
+ submit_size <<= 2;
+
+ for (i = 0; i < submit_size; i += 4)
+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+
+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+ }
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+ return ret;
+}
+
+
+/**
+ * psb_accel_2d_copy_direction - compute blit order
+ * @xdir: X direction of move
+ * @ydir: Y direction of move
+ *
+ * Compute the correct order setings to ensure that an overlapping blit
+ * correctly copies all the pixels.
+ */
+static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
+{
+ if (xdir < 0)
+ return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
+ PSB_2D_COPYORDER_TR2BL;
+ else
+ return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
+ PSB_2D_COPYORDER_TL2BR;
+}
+
+/**
+ * psb_accel_2d_copy - accelerated 2D copy
+ * @dev_priv: our DRM device
+ * @src_offset in bytes
+ * @src_stride in bytes
+ * @src_format psb 2D format defines
+ * @dst_offset in bytes
+ * @dst_stride in bytes
+ * @dst_format psb 2D format defines
+ * @src_x offset in pixels
+ * @src_y offset in pixels
+ * @dst_x offset in pixels
+ * @dst_y offset in pixels
+ * @size_x of the copied area
+ * @size_y of the copied area
+ *
+ * Format and issue a 2D accelerated copy command.
+ */
+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+ uint32_t src_offset, uint32_t src_stride,
+ uint32_t src_format, uint32_t dst_offset,
+ uint32_t dst_stride, uint32_t dst_format,
+ uint16_t src_x, uint16_t src_y,
+ uint16_t dst_x, uint16_t dst_y,
+ uint16_t size_x, uint16_t size_y)
+{
+ uint32_t blit_cmd;
+ uint32_t buffer[10];
+ uint32_t *buf;
+ uint32_t direction;
+
+ buf = buffer;
+
+ direction =
+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+
+ if (direction == PSB_2D_COPYORDER_BR2TL ||
+ direction == PSB_2D_COPYORDER_TR2BL) {
+ src_x += size_x - 1;
+ dst_x += size_x - 1;
+ }
+ if (direction == PSB_2D_COPYORDER_BR2TL ||
+ direction == PSB_2D_COPYORDER_BL2TR) {
+ src_y += size_y - 1;
+ dst_y += size_y - 1;
+ }
+
+ blit_cmd =
+ PSB_2D_BLIT_BH |
+ PSB_2D_ROT_NONE |
+ PSB_2D_DSTCK_DISABLE |
+ PSB_2D_SRCCK_DISABLE |
+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+
+ *buf++ = PSB_2D_FENCE_BH;
+ *buf++ =
+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+ PSB_2D_DST_STRIDE_SHIFT);
+ *buf++ = dst_offset;
+ *buf++ =
+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+ PSB_2D_SRC_STRIDE_SHIFT);
+ *buf++ = src_offset;
+ *buf++ =
+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
+ *buf++ = blit_cmd;
+ *buf++ =
+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+ PSB_2D_DST_YSTART_SHIFT);
+ *buf++ =
+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+ PSB_2D_DST_YSIZE_SHIFT);
+ *buf++ = PSB_2D_FLUSH_BH;
+
+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
+}
+
+/**
+ * psbfb_copyarea_accel - copyarea acceleration for /dev/fb
+ * @info: our framebuffer
+ * @a: copyarea parameters from the framebuffer core
+ *
+ * Perform a 2D copy via the accelerator
+ */
+static void psbfb_copyarea_accel(struct fb_info *info,
+ const struct fb_copyarea *a)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t offset;
+ uint32_t stride;
+ uint32_t src_format;
+ uint32_t dst_format;
+
+ if (!fb)
+ return;
+
+ offset = psbfb->gtt->offset;
+ stride = fb->pitches[0];
+
+ switch (fb->depth) {
+ case 8:
+ src_format = PSB_2D_SRC_332RGB;
+ dst_format = PSB_2D_DST_332RGB;
+ break;
+ case 15:
+ src_format = PSB_2D_SRC_555RGB;
+ dst_format = PSB_2D_DST_555RGB;
+ break;
+ case 16:
+ src_format = PSB_2D_SRC_565RGB;
+ dst_format = PSB_2D_DST_565RGB;
+ break;
+ case 24:
+ case 32:
+ /* this is wrong but since we don't do blending its okay */
+ src_format = PSB_2D_SRC_8888ARGB;
+ dst_format = PSB_2D_DST_8888ARGB;
+ break;
+ default:
+ /* software fallback */
+ cfb_copyarea(info, a);
+ return;
+ }
+
+ if (!gma_power_begin(dev, false)) {
+ cfb_copyarea(info, a);
+ return;
+ }
+ psb_accel_2d_copy(dev_priv,
+ offset, stride, src_format,
+ offset, stride, dst_format,
+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+ gma_power_end(dev);
+}
+
+/**
+ * psbfb_copyarea - 2D copy interface
+ * @info: our framebuffer
+ * @region: region to copy
+ *
+ * Copy an area of the framebuffer console either by the accelerator
+ * or directly using the cfb helpers according to the request
+ */
+void psbfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
+ return;
+
+ /* Avoid the 8 pixel erratum */
+ if (region->width == 8 || region->height == 8 ||
+ (info->flags & FBINFO_HWACCEL_DISABLED))
+ return cfb_copyarea(info, region);
+
+ psbfb_copyarea_accel(info, region);
+}
+
+/**
+ * psbfb_sync - synchronize 2D
+ * @info: our framebuffer
+ *
+ * Wait for the 2D engine to quiesce so that we can do CPU
+ * access to the framebuffer again
+ */
+int psbfb_sync(struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long _end = jiffies + HZ;
+ int busy = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
+ /*
+ * First idle the 2D engine.
+ */
+
+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+ goto out;
+
+ do {
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ cpu_relax();
+ } while (busy && !time_after_eq(jiffies, _end));
+
+ if (busy)
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ if (busy)
+ goto out;
+
+ do {
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+ cpu_relax();
+ } while (busy && !time_after_eq(jiffies, _end));
+ if (busy)
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+
+out:
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+ return (busy) ? -EBUSY : 0;
+}
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
new file mode 100644
index 00000000000..ea7dfc59d79
--- /dev/null
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -0,0 +1,94 @@
+/*
+ * GMA500 Backlight Interface
+ *
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "power.h"
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+static void do_gma_backlight_set(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ backlight_update_status(dev_priv->backlight_device);
+}
+#endif
+
+void gma_backlight_enable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = true;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
+void gma_backlight_disable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = false;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = 0;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
+void gma_backlight_set(struct drm_device *dev, int v)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_level = v;
+ if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
+ dev_priv->backlight_device->props.brightness = v;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
+int gma_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = true;
+ return dev_priv->ops->backlight_init(dev);
+#else
+ return 0;
+#endif
+}
+
+void gma_backlight_exit(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = 0;
+ backlight_update_status(dev_priv->backlight_device);
+ backlight_device_unregister(dev_priv->backlight_device);
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/gma500/blitter.c b/drivers/gpu/drm/gma500/blitter.c
new file mode 100644
index 00000000000..9cd54a6fb89
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, Patrik Jakobsson
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#include "psb_drv.h"
+
+#include "blitter.h"
+#include "psb_reg.h"
+
+/* Wait for the blitter to be completely idle */
+int gma_blt_wait_idle(struct drm_psb_private *dev_priv)
+{
+ unsigned long stop = jiffies + HZ;
+ int busy = 1;
+
+ /* NOP for Cedarview */
+ if (IS_CDV(dev_priv->dev))
+ return 0;
+
+ /* First do a quick check */
+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+ return 0;
+
+ do {
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ } while (busy && !time_after_eq(jiffies, stop));
+
+ if (busy)
+ return -EBUSY;
+
+ do {
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+ } while (busy && !time_after_eq(jiffies, stop));
+
+ /* If still busy, we probably have a hang */
+ return (busy) ? -EBUSY : 0;
+}
diff --git a/drivers/gpu/drm/gma500/blitter.h b/drivers/gpu/drm/gma500/blitter.h
new file mode 100644
index 00000000000..b83648df590
--- /dev/null
+++ b/drivers/gpu/drm/gma500/blitter.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014, Patrik Jakobsson
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#ifndef __BLITTER_H
+#define __BLITTER_H
+
+extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
new file mode 100644
index 00000000000..3531f90e53d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -0,0 +1,626 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "cdv_device.h"
+#include "gma_device.h"
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+static void cdv_disable_vga(struct drm_device *dev)
+{
+ u8 sr1;
+ u32 vga_reg;
+
+ vga_reg = VGACNTRL;
+
+ outb(1, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1<<5, VGA_SR_DATA);
+ udelay(300);
+
+ REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+ REG_READ(vga_reg);
+}
+
+static int cdv_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ drm_mode_create_scaling_mode_property(dev);
+
+ cdv_disable_vga(dev);
+
+ cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+ cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+
+ /* These bits indicate HDMI not SDVO on CDV */
+ if (REG_READ(SDVOB) & SDVO_DETECTED) {
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+ if (REG_READ(DP_B) & DP_DETECTED)
+ cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
+ }
+
+ if (REG_READ(SDVOC) & SDVO_DETECTED) {
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+ if (REG_READ(DP_C) & DP_DETECTED)
+ cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Cedartrail Backlght Interfaces
+ */
+
+static struct backlight_device *cdv_backlight_device;
+
+static int cdv_backlight_combination_mode(struct drm_device *dev)
+{
+ return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
+}
+
+static u32 cdv_get_max_backlight(struct drm_device *dev)
+{
+ u32 max = REG_READ(BLC_PWM_CTL);
+
+ if (max == 0) {
+ DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
+ /* i915 does this, I believe which means that we should not
+ * smash PWM control as firmware will take control of it. */
+ return 1;
+ }
+
+ max >>= 16;
+ if (cdv_backlight_combination_mode(dev))
+ max *= 0xff;
+ return max;
+}
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+
+ if (cdv_backlight_combination_mode(dev)) {
+ u8 lbpc;
+
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ val *= lbpc;
+ }
+ return (val * 100)/cdv_get_max_backlight(dev);
+
+}
+
+static int cdv_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ level *= cdv_get_max_backlight(dev);
+ level /= 100;
+
+ if (cdv_backlight_combination_mode(dev)) {
+ u32 max = cdv_get_max_backlight(dev);
+ u8 lbpc;
+
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+
+ pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+ }
+
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ return 0;
+}
+
+static const struct backlight_ops cdv_ops = {
+ .get_brightness = cdv_get_brightness,
+ .update_status = cdv_set_brightness,
+};
+
+static int cdv_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ cdv_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &cdv_ops, &props);
+ if (IS_ERR(cdv_backlight_device))
+ return PTR_ERR(cdv_backlight_device);
+
+ cdv_backlight_device->props.brightness =
+ cdv_get_brightness(cdv_backlight_device);
+ backlight_update_status(cdv_backlight_device);
+ dev_priv->backlight_device = cdv_backlight_device;
+ dev_priv->backlight_enabled = true;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Cedarview specific chip logic and low level methods
+ * for power management
+ *
+ * FIXME: we need to implement the apm/ospm base management bits
+ * for this and the MID devices.
+ */
+
+static inline u32 CDV_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+
+static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_GFX_ON 0x02
+#define PSB_PWRGT_GFX_OFF 0x01
+#define PSB_PWRGT_GFX_D0 0x00
+#define PSB_PWRGT_GFX_D3 0x03
+
+static void cdv_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt;
+ int i;
+
+ dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_APMBA) & 0xFFFF;
+ dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_OSPMBA) & 0xFFFF;
+
+ /* Power status */
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+
+ /* Enable the GPU */
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+ pwr_cnt |= PSB_PWRGT_GFX_ON;
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+ /* Wait for the GPU power */
+ for (i = 0; i < 5; i++) {
+ u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+ return;
+ udelay(10);
+ }
+ dev_err(dev->dev, "GPU: power management timed out.\n");
+}
+
+static void cdv_errata(struct drm_device *dev)
+{
+ /* Disable bonus launch.
+ * CPU and GPU competes for memory and display misses updates and
+ * flickers. Worst with dual core, dual displays.
+ *
+ * Fixes were done to Win 7 gfx driver to disable a feature called
+ * Bonus Launch to work around the issue, by degrading
+ * performance.
+ */
+ CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+}
+
+/**
+ * cdv_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int cdv_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
+ struct drm_connector *connector;
+
+ dev_dbg(dev->dev, "Saving GPU registers.\n");
+
+ pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
+
+ regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
+ regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
+
+ regs->cdv.saveDSPARB = REG_READ(DSPARB);
+ regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1);
+ regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2);
+ regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3);
+ regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4);
+ regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5);
+ regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6);
+
+ regs->cdv.saveADPA = REG_READ(ADPA);
+
+ regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL);
+ regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+ regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2);
+ regs->cdv.saveLVDS = REG_READ(LVDS);
+
+ regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+
+ regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS);
+ regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS);
+ regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE);
+
+ regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL);
+
+ regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R);
+ regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+ return 0;
+}
+
+/**
+ * cdv_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ *
+ * FIXME: review
+ */
+static int cdv_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
+ struct drm_connector *connector;
+ u32 temp;
+
+ pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
+
+ REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
+ REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
+
+ /* BIOS does below anyway */
+ REG_WRITE(DPIO_CFG, 0);
+ REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+
+ temp = REG_READ(DPLL_A);
+ if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
+ REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(DPLL_A);
+ }
+
+ temp = REG_READ(DPLL_B);
+ if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
+ REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(DPLL_B);
+ }
+
+ udelay(500);
+
+ REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]);
+ REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]);
+ REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]);
+ REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]);
+ REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]);
+ REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]);
+
+ REG_WRITE(DSPARB, regs->cdv.saveDSPARB);
+ REG_WRITE(ADPA, regs->cdv.saveADPA);
+
+ REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2);
+ REG_WRITE(LVDS, regs->cdv.saveLVDS);
+ REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL);
+ REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS);
+ REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL);
+ REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS);
+ REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS);
+ REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE);
+ REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL);
+
+ REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL);
+
+ REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER);
+ REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
+
+ /* Fix arbitration bug */
+ cdv_errata(dev);
+
+ drm_mode_config_reset(dev);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+ /* Resume the modeset for every activated CRTC */
+ drm_helper_resume_force_mode(dev);
+ return 0;
+}
+
+static int cdv_power_down(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt, pwr_mask, pwr_sts;
+ int tries = 5;
+
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+ pwr_cnt |= PSB_PWRGT_GFX_OFF;
+ pwr_mask = PSB_PWRGT_GFX_MASK;
+
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+ while (tries--) {
+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D3)
+ return 0;
+ udelay(10);
+ }
+ return 0;
+}
+
+static int cdv_power_up(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt, pwr_mask, pwr_sts;
+ int tries = 5;
+
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+ pwr_cnt |= PSB_PWRGT_GFX_ON;
+ pwr_mask = PSB_PWRGT_GFX_MASK;
+
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+ while (tries--) {
+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D0)
+ return 0;
+ udelay(10);
+ }
+ return 0;
+}
+
+static void cdv_hotplug_work_func(struct work_struct *work)
+{
+ struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
+ hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
+/* The core driver has received a hotplug IRQ. We are in IRQ context
+ so extract the needed information and kick off queued processing */
+
+static int cdv_hotplug_event(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ schedule_work(&dev_priv->hotplug_work);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ return 1;
+}
+
+static void cdv_hotplug_enable(struct drm_device *dev, bool on)
+{
+ if (on) {
+ u32 hotplug = REG_READ(PORT_HOTPLUG_EN);
+ hotplug |= HDMIB_HOTPLUG_INT_EN | HDMIC_HOTPLUG_INT_EN |
+ HDMID_HOTPLUG_INT_EN | CRT_HOTPLUG_INT_EN;
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug);
+ } else {
+ REG_WRITE(PORT_HOTPLUG_EN, 0);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+}
+
+static const char *force_audio_names[] = {
+ "off",
+ "auto",
+ "on",
+};
+
+void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "audio",
+ ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+ drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_object_attach_property(&connector->base, prop, 0);
+}
+
+
+static const char *broadcast_rgb_names[] = {
+ "Full",
+ "Limited 16:235",
+};
+
+void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+ drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, 0);
+}
+
+/* Cedarview */
+static const struct psb_offset cdv_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .dpll_md = DPLL_A_MD,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .dpll_md = DPLL_B_MD,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
+static int cdv_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = cdv_regmap;
+ gma_get_core_freq(dev);
+ psb_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ cdv_hotplug_enable(dev, false);
+ return 0;
+}
+
+/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+
+const struct psb_ops cdv_chip_ops = {
+ .name = "GMA3600/3650",
+ .accel_2d = 0,
+ .pipes = 2,
+ .crtcs = 2,
+ .hdmi_mask = (1 << 0) | (1 << 1),
+ .lvds_mask = (1 << 1),
+ .sdvo_mask = (1 << 0),
+ .cursor_needs_phys = 0,
+ .sgx_offset = MRST_SGX_OFFSET,
+ .chip_setup = cdv_chip_setup,
+ .errata = cdv_errata,
+
+ .crtc_helper = &cdv_intel_helper_funcs,
+ .crtc_funcs = &cdv_intel_crtc_funcs,
+ .clock_funcs = &cdv_clock_funcs,
+
+ .output_init = cdv_output_init,
+ .hotplug = cdv_hotplug_event,
+ .hotplug_enable = cdv_hotplug_enable,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = cdv_backlight_init,
+#endif
+
+ .init_pm = cdv_init_pm,
+ .save_regs = cdv_save_display_registers,
+ .restore_regs = cdv_restore_display_registers,
+ .power_down = cdv_power_down,
+ .power_up = cdv_power_up,
+ .update_wm = cdv_update_wm,
+ .disable_sr = cdv_disable_sr,
+};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
new file mode 100644
index 00000000000..705c11d47d4
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern const struct gma_clock_funcs cdv_clock_funcs;
+extern void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+ int reg);
+extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc);
+extern void cdv_disable_sr(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
new file mode 100644
index 00000000000..c18268cd516
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include "cdv_device.h"
+#include <linux/pm_runtime.h>
+
+
+static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ u32 temp, reg;
+ reg = ADPA;
+
+ temp = REG_READ(reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ REG_WRITE(reg, temp);
+}
+
+static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* The lowest clock for CDV is 20000KHz */
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ /* The max clock for CDV is 355 instead of 400 */
+ if (mode->clock > 355000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int dpll_md_reg;
+ u32 adpa, dpll_md;
+ u32 adpa_reg;
+
+ if (gma_crtc->pipe == 0)
+ dpll_md_reg = DPLL_A_MD;
+ else
+ dpll_md_reg = DPLL_B_MD;
+
+ adpa_reg = ADPA;
+
+ /*
+ * Disable separate mode multiplier used when cloning SDVO to CRT
+ * XXX this needs to be adjusted when we really are cloning
+ */
+ {
+ dpll_md = REG_READ(dpll_md_reg);
+ REG_WRITE(dpll_md_reg,
+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+ }
+
+ adpa = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ if (gma_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ REG_WRITE(adpa_reg, adpa);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+ bool force)
+{
+ struct drm_device *dev = connector->dev;
+ u32 hotplug_en;
+ int i, tries = 0, ret = false;
+ u32 orig;
+
+ /*
+ * On a CDV thep, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+ tries = 2;
+
+ orig = hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
+ /* turn on the FORCE_DETECT */
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
+ /* wait for FORCE_DETECT to go off */
+ do {
+ if (!(REG_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
+ }
+
+ if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+ CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ REG_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ REG_WRITE(PORT_HOTPLUG_EN, orig);
+ return ret;
+}
+
+static enum drm_connector_status cdv_intel_crt_detect(
+ struct drm_connector *connector, bool force)
+{
+ if (cdv_intel_crt_detect_hotplug(connector, force))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void cdv_intel_crt_destroy(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ return psb_intel_ddc_get_modes(connector,
+ &gma_encoder->ddc_bus->adapter);
+}
+
+static int cdv_intel_crt_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+ .dpms = cdv_intel_crt_dpms,
+ .mode_fixup = gma_encoder_mode_fixup,
+ .prepare = gma_encoder_prepare,
+ .commit = gma_encoder_commit,
+ .mode_set = cdv_intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cdv_intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cdv_intel_crt_destroy,
+ .set_property = cdv_intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_crt_connector_helper_funcs = {
+ .mode_valid = cdv_intel_crt_mode_valid,
+ .get_modes = cdv_intel_crt_get_modes,
+ .best_encoder = gma_best_encoder,
+};
+
+static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+ .destroy = cdv_intel_crt_enc_destroy,
+};
+
+void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+
+ struct gma_connector *gma_connector;
+ struct gma_encoder *gma_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ u32 i2c_reg;
+
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
+ return;
+
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
+ goto failed_connector;
+
+ connector = &gma_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ drm_connector_init(dev, connector,
+ &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ encoder = &gma_encoder->base;
+ drm_encoder_init(dev, encoder,
+ &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+
+ /* Set up the DDC bus. */
+ i2c_reg = GPIOA;
+ /* Remove the following code for CDV */
+ /*
+ if (dev_priv->crt_ddc_bus != 0)
+ i2c_reg = dev_priv->crt_ddc_bus;
+ }*/
+ gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ i2c_reg, "CRTDDC_A");
+ if (!gma_encoder->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ goto failed_ddc;
+ }
+
+ gma_encoder->type = INTEL_OUTPUT_ANALOG;
+ /*
+ psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+ psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ */
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+
+ return;
+failed_ddc:
+ drm_encoder_cleanup(&gma_encoder->base);
+ drm_connector_cleanup(&gma_connector->base);
+ kfree(gma_connector);
+failed_connector:
+ kfree(gma_encoder);
+ return;
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
new file mode 100644
index 00000000000..66727328832
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -0,0 +1,999 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "gma_display.h"
+#include "power.h"
+#include "cdv_device.h"
+
+static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock);
+
+
+#define CDV_LIMIT_SINGLE_LVDS_96 0
+#define CDV_LIMIT_SINGLE_LVDS_100 1
+#define CDV_LIMIT_DAC_HDMI_27 2
+#define CDV_LIMIT_DAC_HDMI_96 3
+#define CDV_LIMIT_DP_27 4
+#define CDV_LIMIT_DP_100 5
+
+static const struct gma_limit_t cdv_intel_limits[] = {
+ { /* CDV_SINGLE_LVDS_96MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ .find_pll = gma_find_best_pll,
+ },
+ { /* CDV_SINGLE_LVDS_100MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ .find_pll = gma_find_best_pll,
+ },
+ { /* CDV_DAC_HDMI_27MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1809000, .max = 3564000},
+ .n = {.min = 1, .max = 1},
+ .m = {.min = 67, .max = 132},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 65, .max = 130},
+ .p = {.min = 5, .max = 90},
+ .p1 = {.min = 1, .max = 9},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = gma_find_best_pll,
+ },
+ { /* CDV_DAC_HDMI_96MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 5, .max = 100},
+ .p1 = {.min = 1, .max = 10},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = gma_find_best_pll,
+ },
+ { /* CDV_DP_27MHz */
+ .dot = {.min = 160000, .max = 272000},
+ .vco = {.min = 1809000, .max = 3564000},
+ .n = {.min = 1, .max = 1},
+ .m = {.min = 67, .max = 132},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 65, .max = 130},
+ .p = {.min = 5, .max = 90},
+ .p1 = {.min = 1, .max = 9},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = cdv_intel_find_dp_pll,
+ },
+ { /* CDV_DP_100MHz */
+ .dot = {.min = 160000, .max = 272000},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 164},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 162},
+ .p = {.min = 5, .max = 100},
+ .p1 = {.min = 1, .max = 10},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = cdv_intel_find_dp_pll,
+ }
+};
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !in_dbg_master()) \
+ msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+
+int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before read\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after read\n");
+ return ret;
+ }
+
+ *val = REG_READ(SB_DATA);
+
+ return 0;
+}
+
+int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+{
+ int ret;
+ static bool dpio_debug = true;
+ u32 temp;
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+ DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+ }
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before write\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_DATA, val);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after write\n");
+ return ret;
+ }
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+ }
+
+ return 0;
+}
+
+/* Reset the DPIO configuration register. The BIOS does this at every
+ * mode set.
+ */
+void cdv_sb_reset(struct drm_device *dev)
+{
+
+ REG_WRITE(DPIO_CFG, 0);
+ REG_READ(DPIO_CFG);
+ REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+}
+
+/* Unlike most Intel display engines, on Cedarview the DPLL registers
+ * are behind this sideband bus. They must be programmed while the
+ * DPLL reference clock is on in the DPLL control register, but before
+ * the DPLL is enabled in the DPLL control register.
+ */
+static int
+cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+ struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ u32 m, n_vco, p;
+ int ret = 0;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
+ u32 ref_value;
+ u32 lane_reg, lane_value;
+
+ cdv_sb_reset(dev);
+
+ REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
+
+ udelay(100);
+
+ /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+ ref_value = 0x68A701;
+
+ cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+
+ /* We don't know what the other fields of these regs are, so
+ * leave them in place.
+ */
+ /*
+ * The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
+ * for the pipe A/B. Display spec 1.06 has wrong definition.
+ * Correct definition is like below:
+ *
+ * refclka mean use clock from same PLL
+ *
+ * if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
+ *
+ * if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
+ *
+ */
+ ret = cdv_sb_read(dev, ref_sfr, &ref_value);
+ if (ret)
+ return ret;
+ ref_value &= ~(REF_CLK_MASK);
+
+ /* use DPLL_A for pipeB on CRT/HDMI */
+ if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
+ DRM_DEBUG_KMS("use DPLLA for pipe B\n");
+ ref_value |= REF_CLK_DPLLA;
+ } else {
+ DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
+ ref_value |= REF_CLK_DPLL;
+ }
+ ret = cdv_sb_write(dev, ref_sfr, ref_value);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_M(pipe), &m);
+ if (ret)
+ return ret;
+ m &= ~SB_M_DIVIDER_MASK;
+ m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+ ret = cdv_sb_write(dev, SB_M(pipe), m);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+ if (ret)
+ return ret;
+
+ /* Follow the BIOS to program the N_DIVIDER REG */
+ n_vco &= 0xFFFF;
+ n_vco |= 0x107;
+ n_vco &= ~(SB_N_VCO_SEL_MASK |
+ SB_N_DIVIDER_MASK |
+ SB_N_CB_TUNE_MASK);
+
+ n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+
+ if (clock->vco < 2250000) {
+ n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 2750000) {
+ n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 3300000) {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+ } else {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+ }
+
+ ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_P(pipe), &p);
+ if (ret)
+ return ret;
+ p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+ p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+ switch (clock->p2) {
+ case 5:
+ p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+ break;
+ case 10:
+ p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+ break;
+ case 14:
+ p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+ break;
+ case 7:
+ p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+ break;
+ default:
+ DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+ return -EINVAL;
+ }
+ ret = cdv_sb_write(dev, SB_P(pipe), p);
+ if (ret)
+ return ret;
+
+ if (ddi_select) {
+ if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+ } else {
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+ }
+ }
+ return 0;
+}
+
+static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ const struct gma_limit_t *limit;
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ /*
+ * Now only single-channel LVDS is supported on CDV. If it is
+ * incorrect, please add the dual-channel LVDS.
+ */
+ if (refclk == 96000)
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (refclk == 27000)
+ limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
+ } else {
+ if (refclk == 27000)
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+ }
+ return limit;
+}
+
+/* m1 is reserved as 0 in CDV, n is a ring counter */
+static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = (refclk * clock->m) / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk,
+ struct gma_clock_t *best_clock)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct gma_clock_t clock;
+
+ switch (refclk) {
+ case 27000:
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 0;
+ clock.m2 = 118;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 0;
+ clock.m2 = 98;
+ }
+ break;
+
+ case 100000:
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 5;
+ clock.m1 = 0;
+ clock.m2 = 160;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 5;
+ clock.m1 = 0;
+ clock.m2 = 133;
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ gma_crtc->clock_funcs->clock(refclk, &clock);
+ memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
+ return true;
+}
+
+#define FIFO_PIPEA (1 << 0)
+#define FIFO_PIPEB (1 << 1)
+
+static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = NULL;
+
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ gma_crtc = to_gma_crtc(crtc);
+
+ if (crtc->primary->fb == NULL || !gma_crtc->active)
+ return false;
+ return true;
+}
+
+void cdv_disable_sr(struct drm_device *dev)
+{
+ if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
+
+ /* Disable self-refresh before adjust WM */
+ REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
+ REG_READ(FW_BLC_SELF);
+
+ gma_wait_for_vblank(dev);
+
+ /* Cedarview workaround to write ovelay plane, which force to leave
+ * MAX_FIFO state.
+ */
+ REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
+ REG_READ(OV_OVADD);
+
+ gma_wait_for_vblank(dev);
+ }
+
+}
+
+void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+ /* Is only one pipe enabled? */
+ if (cdv_intel_pipe_enabled(dev, 0) ^ cdv_intel_pipe_enabled(dev, 1)) {
+ u32 fw;
+
+ fw = REG_READ(DSPFW1);
+ fw &= ~DSP_FIFO_SR_WM_MASK;
+ fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
+ fw &= ~CURSOR_B_FIFO_WM_MASK;
+ fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW1, fw);
+
+ fw = REG_READ(DSPFW2);
+ fw &= ~CURSOR_A_FIFO_WM_MASK;
+ fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
+ fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
+ fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW2, fw);
+
+ REG_WRITE(DSPFW3, 0x36000000);
+
+ /* ignore FW4 */
+
+ /* Is pipe b lvds ? */
+ if (gma_crtc->pipe == 1 &&
+ gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ REG_WRITE(DSPFW5, 0x00040330);
+ } else {
+ fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
+ (4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
+ (3 << CURSOR_B_FIFO_WM1_SHIFT) |
+ (4 << CURSOR_FIFO_SR_WM1_SHIFT);
+ REG_WRITE(DSPFW5, fw);
+ }
+
+ REG_WRITE(DSPFW6, 0x10);
+
+ gma_wait_for_vblank(dev);
+
+ /* enable self-refresh for single pipe active */
+ REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ REG_READ(FW_BLC_SELF);
+ gma_wait_for_vblank(dev);
+
+ } else {
+
+ /* HW team suggested values... */
+ REG_WRITE(DSPFW1, 0x3f880808);
+ REG_WRITE(DSPFW2, 0x0b020202);
+ REG_WRITE(DSPFW3, 0x24000000);
+ REG_WRITE(DSPFW4, 0x08030202);
+ REG_WRITE(DSPFW5, 0x01010101);
+ REG_WRITE(DSPFW6, 0x1d0);
+
+ gma_wait_for_vblank(dev);
+
+ dev_priv->ops->disable_sr(dev);
+ }
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 0x3;
+}
+
+static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ int refclk;
+ struct gma_clock_t clock;
+ u32 dpll = 0, dspcntr, pipeconf;
+ bool ok;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_hdmi = false, is_dp = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ const struct gma_limit_t *limit;
+ u32 ddi_select = 0;
+ bool is_edp = false;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ ddi_select = gma_encoder->ddi_select;
+ switch (gma_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_HDMI:
+ is_hdmi = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_edp = true;
+ break;
+ default:
+ DRM_ERROR("invalid output type.\n");
+ return 0;
+ }
+ }
+
+ if (dev_priv->dplla_96mhz)
+ /* low-end sku, 96/100 mhz */
+ refclk = 96000;
+ else
+ /* high-end sku, 27/100 mhz */
+ refclk = 27000;
+ if (is_dp || is_edp) {
+ /*
+ * Based on the spec the low-end SKU has only CRT/LVDS. So it is
+ * unnecessary to consider it for DP/eDP.
+ * On the high-end SKU, it will use the 27/100M reference clk
+ * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
+ * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
+ * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
+ */
+ if (pipe == 0)
+ refclk = 27000;
+ else
+ refclk = 100000;
+ }
+
+ if (is_lvds && dev_priv->lvds_use_ssc) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
+ }
+
+ drm_mode_debug_printmodeline(adjusted_mode);
+
+ limit = gma_crtc->clock_funcs->limit(crtc, refclk);
+
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
+ &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
+ adjusted_mode->clock, clock.dot);
+ return 0;
+ }
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+/* dpll |= PLL_REF_INPUT_DREFCLK; */
+
+ if (is_dp || is_edp) {
+ cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
+ REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
+ REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
+ REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
+ }
+
+ dpll |= DPLL_SYNCLOCK_ENABLE;
+/* if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL; */
+ /* dpll |= (2 << 11); */
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(map->conf);
+
+ pipeconf &= ~(PIPE_BPC_MASK);
+ if (is_edp) {
+ switch (dev_priv->edp.bpp) {
+ case 24:
+ pipeconf |= PIPE_8BPC;
+ break;
+ case 18:
+ pipeconf |= PIPE_6BPC;
+ break;
+ case 30:
+ pipeconf |= PIPE_10BPC;
+ break;
+ default:
+ pipeconf |= PIPE_8BPC;
+ break;
+ }
+ } else if (is_lvds) {
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ pipeconf |= PIPE_8BPC;
+ else
+ pipeconf |= PIPE_6BPC;
+ } else
+ pipeconf |= PIPE_8BPC;
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(map->dpll);
+
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
+
+ udelay(150);
+
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = REG_READ(LVDS);
+
+ lvds |=
+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+ LVDS_PIPEB_SELECT;
+ /* Set the B0-B3 data pairs corresponding to
+ * whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more
+ * thoroughly into how panels behave in the two modes.
+ */
+
+ REG_WRITE(LVDS, lvds);
+ REG_READ(LVDS);
+ }
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+ REG_WRITE(map->dpll,
+ (REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
+
+ if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
+ dev_err(dev->dev, "Failed to get DPLL lock\n");
+ return -EBUSY;
+ }
+
+ {
+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ }
+
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ REG_WRITE(map->size,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
+
+ gma_wait_for_vblank(dev);
+
+ REG_WRITE(map->cntr, dspcntr);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ gma_wait_for_vblank(dev);
+
+ return 0;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+
+static void i8xx_clock(int refclk, struct gma_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 dpll;
+ u32 fp;
+ struct gma_clock_t clock;
+ bool is_lvds;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+
+ if (gma_power_begin(dev, false)) {
+ dpll = REG_READ(map->dpll);
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = REG_READ(map->fp0);
+ else
+ fp = REG_READ(map->fp1);
+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+ gma_power_end(dev);
+ } else {
+ dpll = p->dpll;
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = p->fp0;
+ else
+ fp = p->fp1;
+
+ is_lvds = (pipe == 1) &&
+ (dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
+ }
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+ if (is_lvds) {
+ clock.p1 =
+ ffs((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ if (clock.p1 == 0) {
+ clock.p1 = 4;
+ dev_err(dev->dev, "PLL %d\n", dpll);
+ }
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i8xx_clock(66000, &clock);
+ } else
+ i8xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 =
+ ((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i8xx_clock(48000, &clock);
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ struct drm_display_mode *mode;
+ int htot;
+ int hsync;
+ int vtot;
+ int vsync;
+
+ if (gma_power_begin(dev, false)) {
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
+ gma_power_end(dev);
+ } else {
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .dpms = gma_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
+ .mode_set = cdv_intel_crtc_mode_set,
+ .mode_set_base = gma_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+ .disable = gma_crtc_disable,
+};
+
+const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+ .save = gma_crtc_save,
+ .restore = gma_crtc_restore,
+ .cursor_set = gma_crtc_cursor_set,
+ .cursor_move = gma_crtc_cursor_move,
+ .gamma_set = gma_crtc_gamma_set,
+ .set_config = gma_crtc_set_config,
+ .destroy = gma_crtc_destroy,
+};
+
+const struct gma_clock_funcs cdv_clock_funcs = {
+ .clock = cdv_intel_clock,
+ .limit = cdv_intel_limit,
+ .pll_is_valid = gma_pll_is_valid,
+};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 00000000000..9ff30c2efad
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1952 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "gma_display.h"
+#include <drm/drm_dp_helper.h>
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !in_dbg_master()) msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+#define CDV_FAST_LINK_TRAIN 1
+
+struct cdv_intel_dp {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ int force_audio;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[4];
+ struct gma_encoder *encoder;
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ bool panel_on;
+};
+
+struct ddi_regoff {
+ uint32_t PreEmph1;
+ uint32_t PreEmph2;
+ uint32_t VSwing1;
+ uint32_t VSwing2;
+ uint32_t VSwing3;
+ uint32_t VSwing4;
+ uint32_t VSwing5;
+};
+
+static struct ddi_regoff ddi_DP_train_table[] = {
+ {.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
+ .VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
+ .VSwing5 = 0x8158,},
+ {.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
+ .VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
+ .VSwing5 = 0x8258,},
+};
+
+static uint32_t dp_vswing_premph_table[] = {
+ 0x55338954, 0x4000,
+ 0x554d8954, 0x2000,
+ 0x55668954, 0,
+ 0x559ac0d4, 0x6000,
+};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct gma_encoder *encoder)
+{
+ return encoder->type == INTEL_OUTPUT_EDP;
+}
+
+
+static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
+
+static int
+cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_lane_count = 4;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ }
+ return max_lane_count;
+}
+
+static int
+cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+cdv_intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+static int
+cdv_intel_dp_link_required(int pixel_clock, int bpp)
+{
+ return (pixel_clock * bpp + 7) / 8;
+}
+
+static int
+cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 19) / 20;
+}
+
+static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp;
+
+ if (intel_dp->panel_on) {
+ DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
+ return;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ pp = REG_READ(PP_CONTROL);
+
+ pp |= EDP_FORCE_VDD;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+ msleep(intel_dp->panel_power_up_delay);
+}
+
+static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ pp = REG_READ(PP_CONTROL);
+
+ pp &= ~EDP_FORCE_VDD;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+
+}
+
+/* Returns true if the panel was already on when called */
+static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
+
+ if (intel_dp->panel_on)
+ return true;
+
+ DRM_DEBUG_KMS("\n");
+ pp = REG_READ(PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+
+ pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+
+ if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
+ DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
+ intel_dp->panel_on = false;
+ } else
+ intel_dp->panel_on = true;
+ msleep(intel_dp->panel_power_up_delay);
+
+ return false;
+}
+
+static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp, idle_off_mask = PP_ON ;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+
+ DRM_DEBUG_KMS("\n");
+
+ pp = REG_READ(PP_CONTROL);
+
+ if ((pp & POWER_TARGET_ON) == 0)
+ return;
+
+ intel_dp->panel_on = false;
+ pp &= ~PANEL_UNLOCK_MASK;
+ /* ILK workaround: disable reset around power sequence */
+
+ pp &= ~POWER_TARGET_ON;
+ pp &= ~EDP_FORCE_VDD;
+ pp &= ~EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+ DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
+
+ if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
+ DRM_DEBUG_KMS("Error in turning off Panel\n");
+ }
+
+ msleep(intel_dp->panel_power_cycle_delay);
+ DRM_DEBUG_KMS("Over\n");
+}
+
+static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ msleep(300);
+ pp = REG_READ(PP_CONTROL);
+
+ pp |= EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ gma_backlight_enable(dev);
+}
+
+static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ gma_backlight_disable(dev);
+ msleep(10);
+ pp = REG_READ(PP_CONTROL);
+
+ pp &= ~EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ msleep(intel_dp->backlight_off_delay);
+}
+
+static int
+cdv_intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
+ int max_lanes = cdv_intel_dp_max_lane_count(encoder);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ /* only refuse the mode on non eDP since we have seen some weird eDP panels
+ which are outside spec tolerances but somehow work by magic */
+ if (!is_edp(encoder) &&
+ (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
+ > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
+ return MODE_CLOCK_HIGH;
+
+ if (is_edp(encoder)) {
+ if (cdv_intel_dp_link_required(mode->clock, 24)
+ > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
+ return MODE_CLOCK_HIGH;
+
+ }
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+static int
+cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint32_t output_reg = intel_dp->output_reg;
+ struct drm_device *dev = encoder->base.dev;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try, precharge;
+
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ * On CDV platform it uses 200MHz as hrawclk.
+ *
+ */
+ aux_clock_divider = 200 / 2;
+
+ precharge = 4;
+ if (is_edp(encoder))
+ precharge = 10;
+
+ if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ REG_READ(ch_ctl));
+ return -EBUSY;
+ }
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ REG_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ REG_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (;;) {
+ status = REG_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ udelay(100);
+ }
+
+ /* Clear done status and any errors */
+ REG_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Timeouts occur when the device isn't connected, so they're
+ * "normal" -- don't fill the kernel log with these */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(REG_READ(ch_data + i),
+ recv + i, recv_bytes - i);
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = DP_AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
+ uint16_t address, uint8_t byte)
+{
+ return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ msg[0] = DP_AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0] >> 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct cdv_intel_dp *intel_dp = container_of(adapter,
+ struct cdv_intel_dp,
+ adapter);
+ struct gma_encoder *encoder = intel_dp->encoder;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ unsigned retry;
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = DP_AUX_I2C_READ << 4;
+ else
+ msg[0] = DP_AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= DP_AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = cdv_intel_dp_aux_ch(encoder,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+
+ switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case DP_AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case DP_AUX_NATIVE_REPLY_DEFER:
+ udelay(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return -EREMOTEIO;
+ }
+
+ switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case DP_AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_i2c nack\n");
+ return -EREMOTEIO;
+ case DP_AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_i2c defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EREMOTEIO;
+}
+
+static int
+cdv_intel_dp_i2c_init(struct gma_connector *connector,
+ struct gma_encoder *encoder, const char *name)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret;
+
+ DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+ intel_dp->algo.running = false;
+ intel_dp->algo.address = 0;
+ intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
+
+ memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+ intel_dp->adapter.owner = THIS_MODULE;
+ intel_dp->adapter.class = I2C_CLASS_DDC;
+ strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ intel_dp->adapter.algo_data = &intel_dp->algo;
+ intel_dp->adapter.dev.parent = connector->base.kdev;
+
+ if (is_edp(encoder))
+ cdv_intel_edp_panel_vdd_on(encoder);
+ ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+ if (is_edp(encoder))
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return ret;
+}
+
+static void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+static bool
+cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ int lane_count, clock;
+ int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
+ int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ int refclock = mode->clock;
+ int bpp = 24;
+
+ if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
+ cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+ refclock = intel_dp->panel_fixed_mode->clock;
+ bpp = dev_priv->edp.bpp;
+ }
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = max_clock; clock >= 0; clock--) {
+ int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+ if (is_edp(intel_encoder)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+ return false;
+}
+
+struct cdv_intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ /*
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }*/
+ uint64_t value, m;
+ m = *num;
+ value = m * (0x800000);
+ m = do_div(value, *den);
+ *num = value;
+ *den = 0x800000;
+}
+
+static void
+cdv_intel_dp_compute_m_n(int bpp,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct cdv_intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
+ m_n->gmch_n = link_clock * nlanes;
+ cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int lane_count = 4, bpp = 24;
+ struct cdv_intel_dp_m_n m_n;
+ int pipe = gma_crtc->pipe;
+
+ /*
+ * Find the lane count in the intel_encoder private
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct gma_encoder *intel_encoder;
+ struct cdv_intel_dp *intel_dp;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = to_gma_encoder(encoder);
+ intel_dp = intel_encoder->dev_priv;
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = intel_dp->lane_count;
+ break;
+ } else if (is_edp(intel_encoder)) {
+ lane_count = intel_dp->lane_count;
+ bpp = dev_priv->edp.bpp;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ cdv_intel_dp_compute_m_n(bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ {
+ REG_WRITE(PIPE_GMCH_DATA_M(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+ }
+}
+
+static void
+cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+
+ intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ intel_dp->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ intel_dp->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (intel_dp->has_audio)
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+ }
+
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (gma_crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+ REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
+ DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
+ if (is_edp(intel_encoder)) {
+ uint32_t pfit_control;
+ cdv_intel_edp_panel_on(intel_encoder);
+
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = PFIT_ENABLE;
+ else
+ pfit_control = 0;
+
+ pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+ }
+}
+
+
+/* If the sink supports it, try to set the power state appropriately */
+static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = cdv_intel_dp_aux_native_write_1(encoder,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ udelay(1000);
+ }
+ }
+}
+
+static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
+{
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+ int edp = is_edp(intel_encoder);
+
+ if (edp) {
+ cdv_intel_edp_backlight_off(intel_encoder);
+ cdv_intel_edp_panel_off(intel_encoder);
+ cdv_intel_edp_panel_vdd_on(intel_encoder);
+ }
+ /* Wake up the sink first */
+ cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
+ cdv_intel_dp_link_down(intel_encoder);
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+}
+
+static void cdv_intel_dp_commit(struct drm_encoder *encoder)
+{
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+ int edp = is_edp(intel_encoder);
+
+ if (edp)
+ cdv_intel_edp_panel_on(intel_encoder);
+ cdv_intel_dp_start_link_train(intel_encoder);
+ cdv_intel_dp_complete_link_train(intel_encoder);
+ if (edp)
+ cdv_intel_edp_backlight_on(intel_encoder);
+}
+
+static void
+cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+ uint32_t dp_reg = REG_READ(intel_dp->output_reg);
+ int edp = is_edp(intel_encoder);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (edp) {
+ cdv_intel_edp_backlight_off(intel_encoder);
+ cdv_intel_edp_panel_vdd_on(intel_encoder);
+ }
+ cdv_intel_dp_sink_dpms(intel_encoder, mode);
+ cdv_intel_dp_link_down(intel_encoder);
+ if (edp) {
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+ cdv_intel_edp_panel_off(intel_encoder);
+ }
+ } else {
+ if (edp)
+ cdv_intel_edp_panel_on(intel_encoder);
+ cdv_intel_dp_sink_dpms(intel_encoder, mode);
+ if (!(dp_reg & DP_PORT_EN)) {
+ cdv_intel_dp_start_link_train(intel_encoder);
+ cdv_intel_dp_complete_link_train(intel_encoder);
+ }
+ if (edp)
+ cdv_intel_edp_backlight_on(intel_encoder);
+ }
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ udelay(1000);
+ }
+
+ return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ return cdv_intel_dp_aux_native_read_retry(encoder,
+ DP_LANE0_1_STATUS,
+ intel_dp->link_status,
+ DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
+/*
+static uint8_t
+cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+*/
+static void
+cdv_intel_get_adjust_train(struct gma_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+ uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= CDV_DP_VOLTAGE_MAX)
+ v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
+ p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ intel_dp->train_set[lane] = v | p;
+}
+
+
+static uint8_t
+cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = cdv_intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat)
+{
+
+ struct drm_device *dev = encoder->base.dev;
+ int ret;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+ REG_WRITE(intel_dp->output_reg, dp_reg_value);
+ REG_READ(intel_dp->output_reg);
+
+ ret = cdv_intel_dp_aux_native_write_1(encoder,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ if (ret != 1) {
+ DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
+ dp_train_pat);
+ return false;
+ }
+
+ return true;
+}
+
+
+static bool
+cdv_intel_dplink_set_level(struct gma_encoder *encoder,
+ uint8_t dp_train_pat)
+{
+
+ int ret;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+ ret = cdv_intel_dp_aux_native_write(encoder,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+
+ if (ret != intel_dp->lane_count) {
+ DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
+ intel_dp->train_set[0], intel_dp->lane_count);
+ return false;
+ }
+ return true;
+}
+
+static void
+cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ struct ddi_regoff *ddi_reg;
+ int vswing, premph, index;
+
+ if (intel_dp->output_reg == DP_B)
+ ddi_reg = &ddi_DP_train_table[0];
+ else
+ ddi_reg = &ddi_DP_train_table[1];
+
+ vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
+ premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (vswing + premph > 3)
+ return;
+#ifdef CDV_FAST_LINK_TRAIN
+ return;
+#endif
+ DRM_DEBUG_KMS("Test2\n");
+ //return ;
+ cdv_sb_reset(dev);
+ /* ;Swing voltage programming
+ ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
+ cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
+
+ /* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
+ cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
+
+ /* ;gfx_dpio_set_reg(0x8148, 0x55338954)
+ * The VSwing_PreEmph table is also considered based on the vswing/premp
+ */
+ index = (vswing + premph) * 2;
+ if (premph == 1 && vswing == 1) {
+ cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
+ } else
+ cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
+
+ /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
+ if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
+ cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
+ else
+ cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
+
+ /* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
+ /* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
+
+ /* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
+ cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
+
+ /* ;Pre emphasis programming
+ * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
+ */
+ cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
+
+ /* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
+ index = 2 * premph + 1;
+ cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
+ return;
+}
+
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ int tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ DP |= DP_PORT_EN;
+ DP &= ~DP_LINK_TRAIN_MASK;
+
+ reg = DP;
+ reg |= DP_LINK_TRAIN_PAT_1;
+ /* Enable output, wait for it to become active */
+ REG_WRITE(intel_dp->output_reg, reg);
+ REG_READ(intel_dp->output_reg);
+ gma_wait_for_vblank(dev);
+
+ DRM_DEBUG_KMS("Link config\n");
+ /* Write the link configuration data */
+ cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ 2);
+
+ memset(intel_dp->train_set, 0, 4);
+ voltage = 0;
+ tries = 0;
+ clock_recovery = false;
+
+ DRM_DEBUG_KMS("Start train\n");
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+ intel_dp->train_set[0],
+ intel_dp->link_configuration[0],
+ intel_dp->link_configuration[1]);
+
+ if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
+ DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
+ }
+ cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+ /* Set training pattern 1 */
+
+ cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
+
+ udelay(200);
+ if (!cdv_intel_dp_get_link_status(encoder))
+ break;
+
+ DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+ intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+ intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+ if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("PT1 train is done\n");
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == intel_dp->lane_count)
+ break;
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new intel_dp->train_set as requested by target */
+ cdv_intel_get_adjust_train(encoder);
+
+ }
+
+ if (!clock_recovery) {
+ DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
+ }
+
+ intel_dp->DP = DP;
+}
+
+static void
+cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ bool channel_eq = false;
+ int tries, cr_tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ /* channel equalization */
+ tries = 0;
+ cr_tries = 0;
+ channel_eq = false;
+
+ DRM_DEBUG_KMS("\n");
+ reg = DP | DP_LINK_TRAIN_PAT_2;
+
+ for (;;) {
+
+ DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+ intel_dp->train_set[0],
+ intel_dp->link_configuration[0],
+ intel_dp->link_configuration[1]);
+ /* channel eq pattern */
+
+ if (!cdv_intel_dp_set_link_train(encoder, reg,
+ DP_TRAINING_PATTERN_2)) {
+ DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
+ }
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
+ cdv_intel_dp_link_down(encoder);
+ break;
+ }
+
+ cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+
+ cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
+
+ udelay(1000);
+ if (!cdv_intel_dp_get_link_status(encoder))
+ break;
+
+ DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+ intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+ intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+ /* Make sure clock is still ok */
+ if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ cdv_intel_dp_start_link_train(encoder);
+ cr_tries++;
+ continue;
+ }
+
+ if (cdv_intel_channel_eq_ok(encoder)) {
+ DRM_DEBUG_KMS("PT2 train is done\n");
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ cdv_intel_dp_link_down(encoder);
+ cdv_intel_dp_start_link_train(encoder);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
+
+ /* Compute new intel_dp->train_set as requested by target */
+ cdv_intel_get_adjust_train(encoder);
+ ++tries;
+
+ }
+
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ REG_WRITE(intel_dp->output_reg, reg);
+ REG_READ(intel_dp->output_reg);
+ cdv_intel_dp_aux_native_write_1(encoder,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+cdv_intel_dp_link_down(struct gma_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint32_t DP = intel_dp->DP;
+
+ if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+
+ {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ }
+ REG_READ(intel_dp->output_reg);
+
+ msleep(17);
+
+ REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ REG_READ(intel_dp->output_reg);
+}
+
+static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ enum drm_connector_status status;
+
+ status = connector_status_disconnected;
+ if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ {
+ if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+ status = connector_status_connected;
+ }
+ if (status == connector_status_connected)
+ DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1],
+ intel_dp->dpcd[2], intel_dp->dpcd[3]);
+ return status;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+cdv_intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+ int edp = is_edp(encoder);
+
+ intel_dp->has_audio = false;
+
+ if (edp)
+ cdv_intel_edp_panel_vdd_on(encoder);
+ status = cdv_dp_detect(encoder);
+ if (status != connector_status_connected) {
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+ return status;
+ }
+
+ if (intel_dp->force_audio) {
+ intel_dp->has_audio = intel_dp->force_audio > 0;
+ } else {
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+ }
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return connector_status_connected;
+}
+
+static int cdv_intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct edid *edid = NULL;
+ int ret = 0;
+ int edp = is_edp(intel_encoder);
+
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ if (is_edp(intel_encoder)) {
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+ if (ret) {
+ if (edp && !intel_dp->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, newmode);
+ break;
+ }
+ }
+ }
+
+ return ret;
+ }
+ if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_dp->panel_fixed_mode) {
+ intel_dp->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ }
+ }
+ if (intel_dp->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+
+ return ret;
+}
+
+static bool
+cdv_intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ struct edid *edid;
+ bool has_audio = false;
+ int edp = is_edp(encoder);
+
+ if (edp)
+ cdv_intel_edp_panel_vdd_on(encoder);
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return has_audio;
+}
+
+static int
+cdv_intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = i;
+
+ if (i == 0)
+ has_audio = cdv_intel_dp_detect_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (encoder->base.crtc) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->primary->fb);
+ }
+
+ return 0;
+}
+
+static void
+cdv_intel_dp_destroy(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
+
+ if (is_edp(gma_encoder)) {
+ /* cdv_intel_panel_destroy_backlight(connector->dev); */
+ if (intel_dp->panel_fixed_mode) {
+ kfree(intel_dp->panel_fixed_mode);
+ intel_dp->panel_fixed_mode = NULL;
+ }
+ }
+ i2c_del_adapter(&intel_dp->adapter);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
+ .dpms = cdv_intel_dp_dpms,
+ .mode_fixup = cdv_intel_dp_mode_fixup,
+ .prepare = cdv_intel_dp_prepare,
+ .mode_set = cdv_intel_dp_mode_set,
+ .commit = cdv_intel_dp_commit,
+};
+
+static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cdv_intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_intel_dp_set_property,
+ .destroy = cdv_intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
+ .get_modes = cdv_intel_dp_get_modes,
+ .mode_valid = cdv_intel_dp_mode_valid,
+ .best_encoder = gma_best_encoder,
+};
+
+static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
+ .destroy = cdv_intel_dp_encoder_destroy,
+};
+
+
+static void cdv_intel_dp_add_properties(struct drm_connector *connector)
+{
+ cdv_intel_attach_force_audio_property(connector);
+ cdv_intel_attach_broadcast_rgb_property(connector);
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPC &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
+/* Cedarview display clock gating
+
+ We need this disable dot get correct behaviour while enabling
+ DP/eDP. TODO - investigate if we can turn it back to normality
+ after enabling */
+static void cdv_disable_intel_clock_gating(struct drm_device *dev)
+{
+ u32 reg_value;
+ reg_value = REG_READ(DSPCLK_GATE_D);
+
+ reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
+ DPUNIT_PIPEA_GATE_DISABLE |
+ DPCUNIT_CLOCK_GATE_DISABLE |
+ DPLSUNIT_CLOCK_GATE_DISABLE |
+ DPOUNIT_CLOCK_GATE_DISABLE |
+ DPIOUNIT_CLOCK_GATE_DISABLE);
+
+ REG_WRITE(DSPCLK_GATE_D, reg_value);
+
+ udelay(500);
+}
+
+void
+cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
+{
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct cdv_intel_dp *intel_dp;
+ const char *name = NULL;
+ int type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
+ return;
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
+ goto err_connector;
+ intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
+ if (!intel_dp)
+ goto err_priv;
+
+ if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
+ type = DRM_MODE_CONNECTOR_eDP;
+
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
+
+ drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
+ drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+
+ if (type == DRM_MODE_CONNECTOR_DisplayPort)
+ gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ else
+ gma_encoder->type = INTEL_OUTPUT_EDP;
+
+
+ gma_encoder->dev_priv=intel_dp;
+ intel_dp->encoder = gma_encoder;
+ intel_dp->output_reg = output_reg;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
+ drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+ switch (output_reg) {
+ case DP_B:
+ name = "DPDDC-B";
+ gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+ break;
+ case DP_C:
+ name = "DPDDC-C";
+ gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+ break;
+ }
+
+ cdv_disable_intel_clock_gating(dev);
+
+ cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
+ /* FIXME:fail check */
+ cdv_intel_dp_add_properties(connector);
+
+ if (is_edp(gma_encoder)) {
+ int ret;
+ struct edp_power_seq cur;
+ u32 pp_on, pp_off, pp_div;
+ u32 pwm_ctrl;
+
+ pp_on = REG_READ(PP_CONTROL);
+ pp_on &= ~PANEL_UNLOCK_MASK;
+ pp_on |= PANEL_UNLOCK_REGS;
+
+ REG_WRITE(PP_CONTROL, pp_on);
+
+ pwm_ctrl = REG_READ(BLC_PWM_CTL2);
+ pwm_ctrl |= PWM_PIPE_B;
+ REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
+
+ pp_on = REG_READ(PP_ON_DELAYS);
+ pp_off = REG_READ(PP_OFF_DELAYS);
+ pp_div = REG_READ(PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+
+ intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
+ intel_dp->backlight_on_delay = cur.t8 / 10;
+ intel_dp->backlight_off_delay = cur.t9 / 10;
+ intel_dp->panel_power_down_delay = cur.t10 / 10;
+ intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+
+ cdv_intel_edp_panel_vdd_on(gma_encoder);
+ ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
+ intel_dp->dpcd,
+ sizeof(intel_dp->dpcd));
+ cdv_intel_edp_panel_vdd_off(gma_encoder);
+ if (ret == 0) {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ cdv_intel_dp_encoder_destroy(encoder);
+ cdv_intel_dp_destroy(connector);
+ goto err_priv;
+ } else {
+ DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1],
+ intel_dp->dpcd[2], intel_dp->dpcd[3]);
+
+ }
+ /* The CDV reference driver moves pnale backlight setup into the displays that
+ have a backlight: this is a good idea and one we should probably adopt, however
+ we need to migrate all the drivers before we can do that */
+ /*cdv_intel_panel_setup_backlight(dev); */
+ }
+ return;
+
+err_priv:
+ kfree(gma_connector);
+err_connector:
+ kfree(gma_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
new file mode 100644
index 00000000000..b99084b3f70
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ *
+ * FIXME:
+ * We should probably make this generic and share it with Medfield
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "cdv_device.h"
+#include <linux/pm_runtime.h>
+
+/* hdmi control bits */
+#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define HDMI_BORDER_ENABLE (1 << 7)
+#define HDMI_AUDIO_ENABLE (1 << 6)
+#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
+#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
+/* hdmi-b control bits */
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+
+
+struct mid_intel_hdmi_priv {
+ u32 hdmi_reg;
+ u32 save_HDMIB;
+ bool has_hdmi_sink;
+ bool has_hdmi_audio;
+ /* Should set this when detect hotplug */
+ bool hdmi_device_connected;
+ struct mdfld_hdmi_i2c *i2c_bus;
+ struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
+ struct drm_device *dev;
+};
+
+static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
+ u32 hdmib;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+ hdmib = (2 << 10);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+
+ if (gma_crtc->pipe == 1)
+ hdmib |= HDMIB_PIPE_B_SELECT;
+
+ if (hdmi_priv->has_hdmi_audio) {
+ hdmib |= HDMI_AUDIO_ENABLE;
+ hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+ }
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
+ u32 hdmib;
+
+ hdmib = REG_READ(hdmi_priv->hdmi_reg);
+
+ if (mode != DRM_MODE_DPMS_ON)
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+ else
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
+
+ hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static enum drm_connector_status cdv_hdmi_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
+ struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
+
+ hdmi_priv->has_hdmi_sink = false;
+ hdmi_priv->has_hdmi_audio = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ hdmi_priv->has_hdmi_audio =
+ drm_detect_monitor_audio(edid);
+ }
+ kfree(edid);
+ }
+ return status;
+}
+
+static int cdv_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
+ bool centre;
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_object_property_get_value(&connector->base,
+ property, &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_object_property_set_value(&connector->base,
+ property, value))
+ return -1;
+
+ centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+ (value == DRM_MODE_SCALE_NO_SCALE);
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (centre) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+ encoder->crtc->x, encoder->crtc->y, encoder->crtc->primary->fb))
+ return -1;
+ } else {
+ struct drm_encoder_helper_funcs *helpers
+ = encoder->helper_private;
+ helpers->mode_set(encoder, &crtc->saved_mode,
+ &crtc->saved_adjusted_mode);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return the list of HDMI DDC modes if available.
+ */
+static int cdv_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct edid *edid = NULL;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_HIGH;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ return MODE_OK;
+}
+
+static void cdv_hdmi_destroy(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+ .dpms = cdv_hdmi_dpms,
+ .mode_fixup = gma_encoder_mode_fixup,
+ .prepare = gma_encoder_prepare,
+ .mode_set = cdv_hdmi_mode_set,
+ .commit = gma_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_hdmi_connector_helper_funcs = {
+ .get_modes = cdv_hdmi_get_modes,
+ .mode_valid = cdv_hdmi_mode_valid,
+ .best_encoder = gma_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_hdmi_save,
+ .restore = cdv_hdmi_restore,
+ .detect = cdv_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_hdmi_set_property,
+ .destroy = cdv_hdmi_destroy,
+};
+
+void cdv_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int reg)
+{
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct mid_intel_hdmi_priv *hdmi_priv;
+ int ddc_bus;
+
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+
+ if (!gma_encoder)
+ return;
+
+ gma_connector = kzalloc(sizeof(struct gma_connector),
+ GFP_KERNEL);
+
+ if (!gma_connector)
+ goto err_connector;
+
+ hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+
+ if (!hdmi_priv)
+ goto err_priv;
+
+ connector = &gma_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ encoder = &gma_encoder->base;
+ drm_connector_init(dev, connector,
+ &cdv_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_HDMI;
+ hdmi_priv->hdmi_reg = reg;
+ hdmi_priv->has_hdmi_sink = false;
+ gma_encoder->dev_priv = hdmi_priv;
+
+ drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_hdmi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+
+ switch (reg) {
+ case SDVOB:
+ ddc_bus = GPIOE;
+ gma_encoder->ddi_select = DDI0_SELECT;
+ break;
+ case SDVOC:
+ ddc_bus = GPIOD;
+ gma_encoder->ddi_select = DDI1_SELECT;
+ break;
+ default:
+ DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+ goto failed_ddc;
+ break;
+ }
+
+ gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+
+ if (!gma_encoder->i2c_bus) {
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ goto failed_ddc;
+ }
+
+ hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
+ hdmi_priv->dev = dev;
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_ddc:
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+err_priv:
+ kfree(gma_connector);
+err_connector:
+ kfree(gma_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
new file mode 100644
index 00000000000..8ecc920fc26
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
+
+/**
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPT 0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct cdv_intel_lvds_priv {
+ /**
+ * Saved LVDO output states
+ */
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t saveLVDS;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+};
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 retval;
+
+ if (gma_power_begin(dev, false)) {
+ retval = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ retval = ((dev_priv->regs.saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return retval;
+}
+
+#if 0
+/*
+ * Set LVDS backlight level by I2C command
+ */
+static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+ unsigned int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ u8 out_buf[2];
+ unsigned int blc_i2c_brightness;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = lvds_i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+ BRIGHTNESS_MASK /
+ BRIGHTNESS_MAX_LEVEL);
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+ out_buf[1] = (u8)blc_i2c_brightness;
+
+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+ return 0;
+
+ DRM_ERROR("I2C transfer error\n");
+ return -1;
+}
+
+
+static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 max_pwm_blc;
+ u32 blc_pwm_duty_cycle;
+
+ max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+
+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
+ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+
+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->lvds_bl) {
+ DRM_ERROR("NO LVDS Backlight Info\n");
+ return;
+ }
+
+ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+ cdv_lvds_i2c_set_brightness(dev, level);
+ else
+ cdv_lvds_pwm_set_brightness(dev, level);
+}
+#endif
+
+/**
+ * Sets the backlight level.
+ *
+ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+ */
+static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ if (gma_power_begin(dev, false)) {
+ blc_pwm_ctl =
+ REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL,
+ (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ gma_power_end(dev);
+ } else {
+ blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
+ ~BACKLIGHT_DUTY_CYCLE_MASK;
+ dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ }
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void cdv_intel_lvds_set_power(struct drm_device *dev,
+ struct drm_encoder *encoder, bool on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pp_status;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ cdv_intel_lvds_set_backlight(dev,
+ dev_priv->mode_dev.backlight_duty_cycle);
+ } else {
+ cdv_intel_lvds_set_backlight(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ if (mode == DRM_MODE_DPMS_ON)
+ cdv_intel_lvds_set_power(dev, encoder, true);
+ else
+ cdv_intel_lvds_set_power(dev, encoder, false);
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void cdv_intel_lvds_save(struct drm_connector *connector)
+{
+}
+
+static void cdv_intel_lvds_restore(struct drm_connector *connector)
+{
+}
+
+static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *fixed_mode =
+ dev_priv->mode_dev.panel_fixed_mode;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ return MODE_OK;
+}
+
+static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct drm_encoder *tmp_encoder;
+ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (tmp_encoder != encoder
+ && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+ adjusted_mode->clock = panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode,
+ CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ cdv_intel_lvds_set_power(dev, encoder, false);
+
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ cdv_intel_lvds_get_max_backlight(dev);
+
+ cdv_intel_lvds_set_power(dev, encoder, true);
+}
+
+static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
+
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status cdv_intel_lvds_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ int ret;
+
+ ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * cdv_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void cdv_intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int cdv_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_object_property_get_value(&connector->base,
+ property,
+ &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_object_property_set_value(&connector->base,
+ property,
+ value))
+ return -1;
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->primary->fb))
+ return -1;
+ }
+ } else if (!strcmp(property->name, "backlight") && encoder) {
+ if (drm_object_property_set_value(&connector->base,
+ property,
+ value))
+ return -1;
+ else
+ gma_backlight_set(encoder->dev, value);
+ } else if (!strcmp(property->name, "DPMS") && encoder) {
+ struct drm_encoder_helper_funcs *helpers =
+ encoder->helper_private;
+ helpers->dpms(encoder, value);
+ }
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+ cdv_intel_lvds_helper_funcs = {
+ .dpms = cdv_intel_lvds_encoder_dpms,
+ .mode_fixup = cdv_intel_lvds_mode_fixup,
+ .prepare = cdv_intel_lvds_prepare,
+ .mode_set = cdv_intel_lvds_mode_set,
+ .commit = cdv_intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_lvds_connector_helper_funcs = {
+ .get_modes = cdv_intel_lvds_get_modes,
+ .mode_valid = cdv_intel_lvds_mode_valid,
+ .best_encoder = gma_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_intel_lvds_save,
+ .restore = cdv_intel_lvds_restore,
+ .detect = cdv_intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_intel_lvds_set_property,
+ .destroy = cdv_intel_lvds_destroy,
+};
+
+
+static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+ .destroy = cdv_intel_lvds_enc_destroy,
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * cdv_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
+ struct cdv_intel_lvds_priv *lvds_priv;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan;
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 lvds;
+ int pipe;
+ u8 pin;
+
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
+
+ gma_encoder = kzalloc(sizeof(struct gma_encoder),
+ GFP_KERNEL);
+ if (!gma_encoder)
+ return;
+
+ gma_connector = kzalloc(sizeof(struct gma_connector),
+ GFP_KERNEL);
+ if (!gma_connector)
+ goto failed_connector;
+
+ lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+ if (!lvds_priv)
+ goto failed_lvds_priv;
+
+ gma_encoder->dev_priv = lvds_priv;
+
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
+
+
+ drm_connector_init(dev, connector,
+ &cdv_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder,
+ &cdv_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*Attach connector properties*/
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_object_attach_property(&connector->base,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ /**
+ * Set up I2C bus
+ * FIXME: distroy i2c_bus when exit
+ */
+ gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ GPIOB,
+ "LVDSBLC_B");
+ if (!gma_encoder->i2c_bus) {
+ dev_printk(KERN_ERR,
+ &dev->pdev->dev, "I2C bus registration failed.\n");
+ goto failed_blc_i2c;
+ }
+ gma_encoder->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ GPIOC,
+ "LVDSDDC_C");
+ if (!gma_encoder->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "DDC bus registration " "failed.\n");
+ goto failed_ddc;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ psb_intel_ddc_get_modes(connector,
+ &gma_encoder->ddc_bus->adapter);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this?*/
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = REG_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ mode_dev->panel_fixed_mode =
+ cdv_intel_crtc_mode_get(dev, crtc);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ DRM_DEBUG
+ ("Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+ /* setup PWM */
+ {
+ u32 pwm;
+
+ pwm = REG_READ(BLC_PWM_CTL2);
+ if (pipe == 1)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ pwm |= PWM_ENABLE;
+ REG_WRITE(BLC_PWM_CTL2, pwm);
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ mutex_unlock(&dev->mode_config.mutex);
+ printk(KERN_ERR "Failed find\n");
+ if (gma_encoder->ddc_bus)
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+failed_ddc:
+ printk(KERN_ERR "Failed DDC\n");
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+failed_blc_i2c:
+ printk(KERN_ERR "Failed BLC\n");
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+ kfree(lvds_priv);
+failed_lvds_priv:
+ kfree(gma_connector);
+failed_connector:
+ kfree(gma_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
new file mode 100644
index 00000000000..e7fcc148f33
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -0,0 +1,795 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "framebuffer.h"
+#include "gtt.h"
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+
+static const struct drm_framebuffer_funcs psb_fb_funcs = {
+ .destroy = psb_user_framebuffer_destroy,
+ .create_handle = psb_user_framebuffer_create_handle,
+};
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ uint32_t v;
+
+ if (!fb)
+ return -ENOMEM;
+
+ if (regno > 255)
+ return 1;
+
+ red = CMAP_TOHW(red, info->var.red.length);
+ blue = CMAP_TOHW(blue, info->var.blue.length);
+ green = CMAP_TOHW(green, info->var.green.length);
+ transp = CMAP_TOHW(transp, info->var.transp.length);
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ if (regno < 16) {
+ switch (fb->bits_per_pixel) {
+ case 16:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ case 24:
+ case 32:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+
+ /*
+ * We have to poke our nose in here. The core fb code assumes
+ * panning is part of the hardware that can be invoked before
+ * the actual fb is mapped. In our case that isn't quite true.
+ */
+ if (psbfb->gtt->npage) {
+ /* GTT roll shifts in 4K pages, we need to shift the right
+ number of pages */
+ int pages = info->fix.line_length >> 12;
+ psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ }
+ return 0;
+}
+
+static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct psb_framebuffer *psbfb = vma->vm_private_data;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int page_num;
+ int i;
+ unsigned long address;
+ int ret;
+ unsigned long pfn;
+ unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
+ psbfb->gtt->offset;
+
+ page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ for (i = 0; i < page_num; i++) {
+ pfn = (phys_addr >> PAGE_SHIFT);
+
+ ret = vm_insert_mixed(vma, address, pfn);
+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ break;
+ else if (unlikely(ret != 0)) {
+ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ return ret;
+ }
+ address += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ }
+ return VM_FAULT_NOPAGE;
+}
+
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+}
+
+static const struct vm_operations_struct psbfb_vm_ops = {
+ .fault = psbfb_vm_fault,
+ .open = psbfb_vm_open,
+ .close = psbfb_vm_close
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ if (!psbfb->addr_space)
+ psbfb->addr_space = vma->vm_file->f_mapping;
+ /*
+ * If this is a GEM object then info->screen_base is the virtual
+ * kernel remapping of the object. FIXME: Review if this is
+ * suitable for our mmap work
+ */
+ vma->vm_ops = &psbfb_vm_ops;
+ vma->vm_private_data = (void *)psbfb;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ return 0;
+}
+
+static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+static struct fb_ops psbfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = psbfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = psbfb_mmap,
+ .fb_sync = psbfb_sync,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_roll_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = psbfb_pan,
+ .fb_mmap = psbfb_mmap,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_unaccel_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = psbfb_mmap,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+/**
+ * psb_framebuffer_init - initialize a framebuffer
+ * @dev: our DRM device
+ * @fb: framebuffer to set up
+ * @mode_cmd: mode description
+ * @gt: backing object
+ *
+ * Configure and fill in the boilerplate for our frame buffer. Return
+ * 0 on success or an error code if we fail.
+ */
+static int psb_framebuffer_init(struct drm_device *dev,
+ struct psb_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct gtt_range *gt)
+{
+ u32 bpp, depth;
+ int ret;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (mode_cmd->pitches[0] & 63)
+ return -EINVAL;
+ switch (bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->gtt = gt;
+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * psb_framebuffer_create - create a framebuffer backed by gt
+ * @dev: our DRM device
+ * @mode_cmd: the description of the requested mode
+ * @gt: the backing object
+ *
+ * Create a framebuffer object backed by the gt, and fill in the
+ * boilerplate required
+ *
+ * TODO: review object references
+ */
+
+static struct drm_framebuffer *psb_framebuffer_create
+ (struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct gtt_range *gt)
+{
+ struct psb_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+ if (ret) {
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+ return &fb->base;
+}
+
+/**
+ * psbfb_alloc - allocate frame buffer memory
+ * @dev: the DRM device
+ * @aligned_size: space needed
+ * @force: fall back to GEM buffers if need be
+ *
+ * Allocate the frame buffer. In the usual case we get a GTT range that
+ * is stolen memory backed and life is simple. If there isn't sufficient
+ * we fail as we don't have the virtual mapping space to really vmap it
+ * and the kernel console code can't handle non linear framebuffers.
+ *
+ * Re-address this as and if the framebuffer layer grows this ability.
+ */
+static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
+{
+ struct gtt_range *backing;
+ /* Begin by trying to use stolen memory backing */
+ backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
+ if (backing) {
+ drm_gem_private_object_init(dev, &backing->gem, aligned_size);
+ return backing;
+ }
+ return NULL;
+}
+
+/**
+ * psbfb_create - create a framebuffer
+ * @fbdev: the framebuffer device
+ * @sizes: specification of the layout
+ *
+ * Create a framebuffer to the specifications provided
+ */
+static int psbfb_create(struct psb_fbdev *fbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fbdev->psb_fb_helper.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ int size;
+ int ret;
+ struct gtt_range *backing;
+ u32 bpp, depth;
+ int gtt_roll = 0;
+ int pitch_lines = 0;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ bpp = sizes->surface_bpp;
+ depth = sizes->surface_depth;
+
+ /* No 24bit packed */
+ if (bpp == 24)
+ bpp = 32;
+
+ do {
+ /*
+ * Acceleration via the GTT requires pitch to be
+ * power of two aligned. Preferably page but less
+ * is ok with some fonts
+ */
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the fb in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
+
+ if (pitch_lines)
+ pitch_lines *= 2;
+ else
+ pitch_lines = 1;
+ gtt_roll++;
+ } while (backing == NULL && pitch_lines <= 16);
+
+ /* The final pitch we accepted if we succeeded */
+ pitch_lines /= 2;
+
+ if (backing == NULL) {
+ /*
+ * We couldn't get the space we wanted, fall back to the
+ * display engine requirement instead. The HW requires
+ * the pitch to be 64 byte aligned
+ */
+
+ gtt_roll = 0; /* Don't use GTT accelerated scrolling */
+ pitch_lines = 64;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the framebuffer in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
+ if (backing == NULL)
+ return -ENOMEM;
+ }
+
+ memset(dev_priv->vram_addr + backing->offset, 0, size);
+
+ mutex_lock(&dev->struct_mutex);
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+ info->par = fbdev;
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+ if (ret)
+ goto out_unref;
+
+ fb = &psbfb->base;
+ psbfb->fbdev = info;
+
+ fbdev->psb_fb_helper.fb = fb;
+ fbdev->psb_fb_helper.fbdev = info;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ strcpy(info->fix.id, "psbdrmfb");
+
+ info->flags = FBINFO_DEFAULT;
+ if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
+ info->fbops = &psbfb_ops;
+ else if (gtt_roll) { /* GTT rolling seems best */
+ info->fbops = &psbfb_roll_ops;
+ info->flags |= FBINFO_HWACCEL_YPAN;
+ } else /* Software */
+ info->fbops = &psbfb_unaccel_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ info->fix.smem_start = dev->mode_config.fb_base;
+ info->fix.smem_len = size;
+ info->fix.ywrapstep = gtt_roll;
+ info->fix.ypanstep = 0;
+
+ /* Accessed stolen memory directly */
+ info->screen_base = dev_priv->vram_addr + backing->offset;
+ info->screen_size = size;
+
+ if (dev_priv->gtt.stolen_size) {
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
+ }
+
+ drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+ sizes->fb_width, sizes->fb_height);
+
+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+ dev_dbg(dev->dev, "allocated %dx%d fb\n",
+ psbfb->base.width, psbfb->base.height);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+out_unref:
+ if (backing->stolen)
+ psb_gtt_free_range(dev, backing);
+ else
+ drm_gem_object_unreference(&backing->gem);
+out_err1:
+ mutex_unlock(&dev->struct_mutex);
+ psb_gtt_free_range(dev, backing);
+ return ret;
+}
+
+/**
+ * psb_user_framebuffer_create - create framebuffer
+ * @dev: our DRM device
+ * @filp: client file
+ * @cmd: mode request
+ *
+ * Create a new framebuffer backed by a userspace GEM object
+ */
+static struct drm_framebuffer *psb_user_framebuffer_create
+ (struct drm_device *dev, struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *cmd)
+{
+ struct gtt_range *r;
+ struct drm_gem_object *obj;
+
+ /*
+ * Find the GEM object and thus the gtt range object that is
+ * to back this space
+ */
+ obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ /* Let the core code do all the work */
+ r = container_of(obj, struct gtt_range, gem);
+ return psb_framebuffer_create(dev, cmd, r);
+}
+
+static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+ gma_crtc->lut_r[regno] = red >> 8;
+ gma_crtc->lut_g[regno] = green >> 8;
+ gma_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, int regno)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+ *red = gma_crtc->lut_r[regno] << 8;
+ *green = gma_crtc->lut_g[regno] << 8;
+ *blue = gma_crtc->lut_b[regno] << 8;
+}
+
+static int psbfb_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+ struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int bytespp;
+
+ bytespp = sizes->surface_bpp / 8;
+ if (bytespp == 3) /* no 24bit packed */
+ bytespp = 4;
+
+ /* If the mode will not fit in 32bit then switch to 16bit to get
+ a console on full resolution. The X mode setting server will
+ allocate its own 32bit GEM framebuffer */
+ if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
+ dev_priv->vram_stolen_size) {
+ sizes->surface_bpp = 16;
+ sizes->surface_depth = 16;
+ }
+
+ return psbfb_create(psb_fbdev, sizes);
+}
+
+static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+ .gamma_set = psbfb_gamma_set,
+ .gamma_get = psbfb_gamma_get,
+ .fb_probe = psbfb_probe,
+};
+
+static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+{
+ struct fb_info *info;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+ if (fbdev->psb_fb_helper.fbdev) {
+ info = fbdev->psb_fb_helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_framebuffer_unregister_private(&psbfb->base);
+ drm_framebuffer_cleanup(&psbfb->base);
+
+ if (psbfb->gtt)
+ drm_gem_object_unreference(&psbfb->gtt->gem);
+ return 0;
+}
+
+int psb_fbdev_init(struct drm_device *dev)
+{
+ struct psb_fbdev *fbdev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "no memory\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->fbdev = fbdev;
+ fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+ drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
+ INTELFB_CONN_LIMIT);
+
+ drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ return 0;
+}
+
+static void psb_fbdev_fini(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->fbdev)
+ return;
+
+ psb_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
+
+static void psbfb_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+ drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+}
+
+/**
+ * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
+ * @fb: framebuffer
+ * @file_priv: our DRM file
+ * @handle: returned handle
+ *
+ * Our framebuffer object is a GTT range which also contains a GEM
+ * object. We need to turn it into a handle for userspace. GEM will do
+ * the work for us
+ */
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *r = psbfb->gtt;
+ return drm_gem_handle_create(file_priv, &r->gem, handle);
+}
+
+/**
+ * psb_user_framebuffer_destroy - destruct user created fb
+ * @fb: framebuffer
+ *
+ * User framebuffers are backed by GEM objects so all we have to do is
+ * clean up a bit and drop the reference, GEM will handle the fallout
+ */
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *r = psbfb->gtt;
+
+ /* Let DRM do its clean up */
+ drm_framebuffer_cleanup(fb);
+ /* We are no longer using the resource in GEM */
+ drm_gem_object_unreference_unlocked(&r->gem);
+ kfree(fb);
+}
+
+static const struct drm_mode_config_funcs psb_mode_funcs = {
+ .fb_create = psb_user_framebuffer_create,
+ .output_poll_changed = psbfb_output_poll_changed,
+};
+
+static int psb_create_backlight_property(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *backlight;
+
+ if (dev_priv->backlight_property)
+ return 0;
+
+ backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
+
+ dev_priv->backlight_property = backlight;
+
+ return 0;
+}
+
+static void psb_setup_outputs(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+
+ drm_mode_create_scaling_mode_property(dev);
+ psb_create_backlight_property(dev);
+
+ dev_priv->ops->output_init(dev);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct drm_encoder *encoder = &gma_encoder->base;
+ int crtc_mask = 0, clone_mask = 0;
+
+ /* valid crtcs */
+ switch (gma_encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+ break;
+ case INTEL_OUTPUT_SDVO:
+ crtc_mask = dev_priv->ops->sdvo_mask;
+ clone_mask = (1 << INTEL_OUTPUT_SDVO);
+ break;
+ case INTEL_OUTPUT_LVDS:
+ crtc_mask = dev_priv->ops->lvds_mask;
+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
+ break;
+ case INTEL_OUTPUT_MIPI:
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_MIPI);
+ break;
+ case INTEL_OUTPUT_MIPI2:
+ crtc_mask = (1 << 2);
+ clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+ break;
+ case INTEL_OUTPUT_HDMI:
+ crtc_mask = dev_priv->ops->hdmi_mask;
+ clone_mask = (1 << INTEL_OUTPUT_HDMI);
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ crtc_mask = (1 << 0) | (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ break;
+ case INTEL_OUTPUT_EDP:
+ crtc_mask = (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_EDP);
+ }
+ encoder->possible_crtcs = crtc_mask;
+ encoder->possible_clones =
+ gma_connector_clones(dev, clone_mask);
+ }
+}
+
+void psb_modeset_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ int i;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.funcs = &psb_mode_funcs;
+
+ /* set memory base */
+ /* Oaktrail and Poulsbo should use BAR 2*/
+ pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
+ &(dev->mode_config.fb_base));
+
+ /* num pipes is 2 for PSB but 1 for Mrst */
+ for (i = 0; i < dev_priv->num_pipe; i++)
+ psb_intel_crtc_init(dev, i, mode_dev);
+
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+
+ psb_setup_outputs(dev);
+
+ if (dev_priv->ops->errata)
+ dev_priv->ops->errata(dev);
+
+ dev_priv->modeset = true;
+}
+
+void psb_modeset_cleanup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->modeset) {
+ mutex_lock(&dev->struct_mutex);
+
+ drm_kms_helper_poll_fini(dev);
+ psb_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
new file mode 100644
index 00000000000..395f20b07aa
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2008-2011, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _FRAMEBUFFER_H_
+#define _FRAMEBUFFER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+
+struct psb_framebuffer {
+ struct drm_framebuffer base;
+ struct address_space *addr_space;
+ struct fb_info *fbdev;
+ struct gtt_range *gtt;
+};
+
+struct psb_fbdev {
+ struct drm_fb_helper psb_fb_helper;
+ struct psb_framebuffer pfb;
+};
+
+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+
+extern int gma_connector_clones(struct drm_device *dev, int type_mask);
+
+#endif
+
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
new file mode 100644
index 00000000000..c707fa6fca8
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -0,0 +1,229 @@
+/*
+ * psb GEM interface
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Alan Cox
+ *
+ * TODO:
+ * - we need to work out if the MMU is relevant (eg for
+ * accelerated operations on a GEM object)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include <drm/drm_vma_manager.h>
+#include "psb_drv.h"
+
+void psb_gem_free_object(struct drm_gem_object *obj)
+{
+ struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+
+ /* Remove the list map if one is present */
+ drm_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
+ /* This must occur last as it frees up the memory of the GEM object */
+ psb_gtt_free_range(obj->dev, gtt);
+}
+
+int psb_gem_get_aperture(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -EINVAL;
+}
+
+/**
+ * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ int ret = 0;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ /* What validation is needed here ? */
+
+ /* Make it mmapable */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * psb_gem_create - create a mappable object
+ * @file: the DRM file of the client
+ * @dev: our device
+ * @size: the size requested
+ * @handlep: returned handle (opaque number)
+ *
+ * Create a GEM object, fill in the boilerplate and attach a handle to
+ * it so that userspace can speak about it. This does the core work
+ * for the various methods that do/will create GEM objects for things
+ */
+int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
+ u32 *handlep, int stolen, u32 align)
+{
+ struct gtt_range *r;
+ int ret;
+ u32 handle;
+
+ size = roundup(size, PAGE_SIZE);
+
+ /* Allocate our object - for now a direct gtt range which is not
+ stolen memory backed */
+ r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
+ if (r == NULL) {
+ dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
+ return -ENOSPC;
+ }
+ /* Initialize the extra goodies GEM needs to do all the hard work */
+ if (drm_gem_object_init(dev, &r->gem, size) != 0) {
+ psb_gtt_free_range(dev, r);
+ /* GEM doesn't give an error code so use -ENOMEM */
+ dev_err(dev->dev, "GEM init failed for %lld\n", size);
+ return -ENOMEM;
+ }
+ /* Limit the object to 32bit mappings */
+ mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
+ /* Give the object a handle so we can carry it more easily */
+ ret = drm_gem_handle_create(file, &r->gem, &handle);
+ if (ret) {
+ dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+ &r->gem, size);
+ drm_gem_object_release(&r->gem);
+ psb_gtt_free_range(dev, r);
+ return ret;
+ }
+ /* We have the initial and handle reference but need only one now */
+ drm_gem_object_unreference(&r->gem);
+ *handlep = handle;
+ return 0;
+}
+
+/**
+ * psb_gem_dumb_create - create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+ args->size = args->pitch * args->height;
+ return psb_gem_create(file, dev, args->size, &args->handle, 0,
+ PAGE_SIZE);
+}
+
+/**
+ * psb_gem_fault - pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * This code eventually needs to handle faulting objects in and out
+ * of the GTT and repacking it when we run out of space. We can put
+ * that off for now and for our simple uses
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj;
+ struct gtt_range *r;
+ int ret;
+ unsigned long pfn;
+ pgoff_t page_offset;
+ struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
+
+ obj = vma->vm_private_data; /* GEM object */
+ dev = obj->dev;
+ dev_priv = dev->dev_private;
+
+ r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ something from beneath our feet */
+ mutex_lock(&dev->struct_mutex);
+
+ /* For now the mmap pins the object and it stays pinned. As things
+ stand that will do us no harm */
+ if (r->mmapping == 0) {
+ ret = psb_gtt_pin(r);
+ if (ret < 0) {
+ dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+ goto fail;
+ }
+ r->mmapping = 1;
+ }
+
+ /* Page relative to the VMA start - we must calculate this ourselves
+ because vmf->pgoff is the fake GEM offset */
+ page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
+ >> PAGE_SHIFT;
+
+ /* CPU view of the page, don't go via the GART for CPU writes */
+ if (r->stolen)
+ pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+ else
+ pfn = page_to_pfn(r->pages[page_offset]);
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
new file mode 100644
index 00000000000..1381c5190f4
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2014 Patrik Jakobsson
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ **************************************************************************/
+
+#ifndef _GEM_H
+#define _GEM_H
+
+extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
+ u64 size, u32 *handlep, int stolen, u32 align);
+#endif
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
new file mode 100644
index 00000000000..4a295f9ba06
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -0,0 +1,56 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+
+void gma_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ break;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
diff --git a/drivers/gpu/drm/gma500/gma_device.h b/drivers/gpu/drm/gma500/gma_device.h
new file mode 100644
index 00000000000..e1dbb007b82
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_device.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ **************************************************************************/
+
+#ifndef _GMA_DEVICE_H
+#define _GMA_DEVICE_H
+
+extern void gma_get_core_freq(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
new file mode 100644
index 00000000000..9bb9bddd881
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -0,0 +1,791 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#include <drm/drmP.h>
+#include "gma_display.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(l_entry);
+ if (gma_encoder->type == type)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void gma_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ unsigned long start, offset;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->primary->fb) {
+ dev_err(dev->dev, "No FB bound\n");
+ goto gma_pipe_cleaner;
+ }
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto gma_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+ offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
+
+ REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
+
+ dspcntr = REG_READ(map->cntr);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->primary->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto gma_pipe_set_base_exit;
+ }
+ REG_WRITE(map->cntr, dspcntr);
+
+ dev_dbg(dev->dev,
+ "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+ /* FIXME: Investigate whether this really is the base for psb and why
+ the linear offset is named base for the other chips. map->surf
+ should be the base and map->linoff the offset for all chips */
+ if (IS_PSB(dev)) {
+ REG_WRITE(map->base, offset + start);
+ REG_READ(map->base);
+ } else {
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
+ }
+
+gma_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+gma_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/* Loads the palette/gamma unit for the CRTC with the prepared values */
+void gma_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ int palreg = map->palette;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((gma_crtc->lut_r[i] +
+ gma_crtc->lut_adj[i]) << 16) |
+ ((gma_crtc->lut_g[i] +
+ gma_crtc->lut_adj[i]) << 8) |
+ (gma_crtc->lut_b[i] +
+ gma_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
+ dev_priv->regs.pipe[0].palette[i] =
+ ((gma_crtc->lut_r[i] +
+ gma_crtc->lut_adj[i]) << 16) |
+ ((gma_crtc->lut_g[i] +
+ gma_crtc->lut_adj[i]) << 8) |
+ (gma_crtc->lut_b[i] +
+ gma_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+ u32 start, u32 size)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int i;
+ int end = (start + size > 256) ? 256 : start + size;
+
+ for (i = start; i < end; i++) {
+ gma_crtc->lut_r[i] = red[i] >> 8;
+ gma_crtc->lut_g[i] = green[i] >> 8;
+ gma_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ gma_crtc_load_lut(crtc);
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 temp;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+
+ if (IS_CDV(dev))
+ dev_priv->ops->disable_sr(dev);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ if (gma_crtc->active)
+ break;
+
+ gma_crtc->active = true;
+
+ /* Enable the DPLL */
+ temp = REG_READ(map->dpll);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(map->cntr,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ }
+
+ udelay(150);
+
+ /* Enable the pipe */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+ temp = REG_READ(map->status);
+ temp &= ~(0xFFFF);
+ temp |= PIPE_FIFO_UNDERRUN;
+ REG_WRITE(map->status, temp);
+ REG_READ(map->status);
+
+ gma_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ if (!gma_crtc->active)
+ break;
+
+ gma_crtc->active = false;
+
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Turn off vblank interrupts */
+ drm_vblank_off(dev, pipe);
+
+ /* Wait for vblank for the disable to take effect */
+ gma_wait_for_vblank(dev);
+
+ /* Disable plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
+ }
+
+ /* Disable pipe */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ gma_wait_for_vblank(dev);
+
+ udelay(150);
+
+ /* Disable DPLL */
+ temp = REG_READ(map->dpll);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ if (IS_CDV(dev))
+ dev_priv->ops->update_wm(dev, crtc);
+
+ /* Set FIFO watermarks */
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
+ struct drm_gem_object *obj;
+ void *tmp_dst, *tmp_src;
+ int ret = 0, i, cursor_pages;
+
+ /* If we didn't get a handle then turn the cursor off */
+ if (!handle) {
+ temp = CURSOR_MODE_DISABLE;
+ mutex_lock(&dev->struct_mutex);
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* Unpin the old GEM object */
+ if (gma_crtc->cursor_obj) {
+ gt = container_of(gma_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(gma_crtc->cursor_obj);
+ gma_crtc->cursor_obj = NULL;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "Buffer is too small\n");
+ ret = -ENOMEM;
+ goto unref_cursor;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ goto unref_cursor;
+ }
+
+ if (dev_priv->ops->cursor_needs_phys) {
+ if (cursor_gt == NULL) {
+ dev_err(dev->dev, "No hardware cursor mem available");
+ ret = -ENOMEM;
+ goto unref_cursor;
+ }
+
+ /* Prevent overflow */
+ if (gt->npage > 4)
+ cursor_pages = 4;
+ else
+ cursor_pages = gt->npage;
+
+ /* Copy the cursor to cursor mem */
+ tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+ for (i = 0; i < cursor_pages; i++) {
+ tmp_src = kmap(gt->pages[i]);
+ memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+ kunmap(gt->pages[i]);
+ tmp_dst += PAGE_SIZE;
+ }
+
+ addr = gma_crtc->cursor_addr;
+ } else {
+ addr = gt->offset;
+ gma_crtc->cursor_addr = addr;
+ }
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old bo */
+ if (gma_crtc->cursor_obj) {
+ gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(gma_crtc->cursor_obj);
+ }
+
+ gma_crtc->cursor_obj = obj;
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+unref_cursor:
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t addr;
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ addr = gma_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void gma_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+void gma_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void gma_crtc_disable(struct drm_crtc *crtc)
+{
+ struct gtt_range *gt;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->primary->fb) {
+ gt = to_psb_fb(crtc->primary->fb)->gtt;
+ psb_gtt_unpin(gt);
+ }
+}
+
+void gma_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+ kfree(gma_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(gma_crtc);
+}
+
+int gma_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+ ret = drm_crtc_helper_set_config(set);
+ pm_runtime_allow(&dev->pdev->dev);
+
+ return ret;
+}
+
+/**
+ * Save HW states of given crtc
+ */
+void gma_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ uint32_t palette_reg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
+
+ /* NOTE: DSPSIZE DSPPOS only for psb */
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
+
+ crtc_state->saveDSPBASE = REG_READ(map->base);
+
+ palette_reg = map->palette;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
+}
+
+/**
+ * Restore HW states of given crtc
+ */
+void gma_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ uint32_t palette_reg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(map->dpll,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ udelay(150);
+ }
+
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
+
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
+
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
+ udelay(150);
+
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
+
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
+
+ gma_wait_for_vblank(dev);
+
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+
+ gma_wait_for_vblank(dev);
+
+ palette_reg = map->palette;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
+}
+
+void gma_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void gma_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see psb_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void gma_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
+/* Currently there is only a 1:1 mapping of encoders and connectors */
+struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+ return &gma_encoder->base;
+}
+
+void gma_connector_attach_encoder(struct gma_connector *connector,
+ struct gma_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
+
+#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
+
+bool gma_pll_is_valid(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ GMA_PLL_INVALID("p1 out of range");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ GMA_PLL_INVALID("p out of range");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ GMA_PLL_INVALID("m2 out of range");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ GMA_PLL_INVALID("m1 out of range");
+ /* On CDV m1 is always 0 */
+ if (clock->m1 <= clock->m2 && clock->m1 != 0)
+ GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ GMA_PLL_INVALID("m out of range");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ GMA_PLL_INVALID("n out of range");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ GMA_PLL_INVALID("vco out of range");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ GMA_PLL_INVALID("dot out of range");
+
+ return true;
+}
+
+bool gma_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct gma_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ const struct gma_clock_funcs *clock_funcs =
+ to_gma_crtc(crtc)->clock_funcs;
+ struct gma_clock_t clock;
+ int err = target;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* m1 is always 0 on CDV so the outmost loop will run just once */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ (clock.m2 < clock.m1 || clock.m1 == 0) &&
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ clock_funcs->clock(refclk, &clock);
+
+ if (!clock_funcs->pll_is_valid(crtc,
+ limit, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
new file mode 100644
index 00000000000..ed569d8a6af
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#ifndef _GMA_DISPLAY_H_
+#define _GMA_DISPLAY_H_
+
+#include <linux/pm_runtime.h>
+
+struct gma_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+struct gma_range_t {
+ int min, max;
+};
+
+struct gma_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+struct gma_limit_t {
+ struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct gma_p2_t p2;
+ bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *,
+ int target, int refclk,
+ struct gma_clock_t *best_clock);
+};
+
+struct gma_clock_funcs {
+ void (*clock)(int refclk, struct gma_clock_t *clock);
+ const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk);
+ bool (*pll_is_valid)(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock);
+};
+
+/* Common pipe related functions */
+extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
+extern void gma_wait_for_vblank(struct drm_device *dev);
+extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height);
+extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+extern void gma_crtc_load_lut(struct drm_crtc *crtc);
+extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, u32 start, u32 size);
+extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
+extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void gma_crtc_prepare(struct drm_crtc *crtc);
+extern void gma_crtc_commit(struct drm_crtc *crtc);
+extern void gma_crtc_disable(struct drm_crtc *crtc);
+extern void gma_crtc_destroy(struct drm_crtc *crtc);
+extern int gma_crtc_set_config(struct drm_mode_set *set);
+
+extern void gma_crtc_save(struct drm_crtc *crtc);
+extern void gma_crtc_restore(struct drm_crtc *crtc);
+
+extern void gma_encoder_prepare(struct drm_encoder *encoder);
+extern void gma_encoder_commit(struct drm_encoder *encoder);
+extern void gma_encoder_destroy(struct drm_encoder *encoder);
+extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+/* Common clock related functions */
+extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
+extern void gma_clock(int refclk, struct gma_clock_t *clock);
+extern bool gma_pll_is_valid(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock);
+extern bool gma_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct gma_clock_t *best_clock);
+#endif
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
new file mode 100644
index 00000000000..592d205a008
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ * Alan Cox <alan@linux.intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <linux/shmem_fs.h>
+#include "psb_drv.h"
+#include "blitter.h"
+
+
+/*
+ * GTT resource allocator - manage page mappings in GTT space
+ */
+
+/**
+ * psb_gtt_mask_pte - generate GTT pte entry
+ * @pfn: page number to encode
+ * @type: type of memory in the GTT
+ *
+ * Set the GTT entry for the appropriate memory type.
+ */
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+ uint32_t mask = PSB_PTE_VALID;
+
+ /* Ensure we explode rather than put an invalid low mapping of
+ a high mapping page into the gtt */
+ BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
+
+ if (type & PSB_MMU_CACHED_MEMORY)
+ mask |= PSB_PTE_CACHED;
+ if (type & PSB_MMU_RO_MEMORY)
+ mask |= PSB_PTE_RO;
+ if (type & PSB_MMU_WO_MEMORY)
+ mask |= PSB_PTE_WO;
+
+ return (pfn << PAGE_SHIFT) | mask;
+}
+
+/**
+ * psb_gtt_entry - find the GTT entries for a gtt_range
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Given a gtt_range object return the GTT offset of the page table
+ * entries for this gtt_range
+ */
+static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long offset;
+
+ offset = r->resource.start - dev_priv->gtt_mem->start;
+
+ return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+}
+
+/**
+ * psb_gtt_insert - put an object into the GTT
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Take our preallocated GTT range and insert the GEM object into
+ * the GTT. This is protected via the gtt mutex which the caller
+ * must hold.
+ */
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
+ int resume)
+{
+ u32 __iomem *gtt_slot;
+ u32 pte;
+ struct page **pages;
+ int i;
+
+ if (r->pages == NULL) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(r->stolen); /* refcount these maybe ? */
+
+ gtt_slot = psb_gtt_entry(dev, r);
+ pages = r->pages;
+
+ if (!resume) {
+ /* Make sure changes are visible to the GPU */
+ set_pages_array_wc(pages, r->npage);
+ }
+
+ /* Write our page entries into the GTT itself */
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
+ iowrite32(pte, gtt_slot++);
+ }
+ /* Make sure all the entries are set before we return */
+ ioread32(gtt_slot - 1);
+
+ return 0;
+}
+
+/**
+ * psb_gtt_remove - remove an object from the GTT
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Remove a preallocated GTT range from the GTT. Overwrite all the
+ * page table entries with the dummy page. This is protected via the gtt
+ * mutex which the caller must hold.
+ */
+void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 __iomem *gtt_slot;
+ u32 pte;
+ int i;
+
+ WARN_ON(r->stolen);
+
+ gtt_slot = psb_gtt_entry(dev, r);
+ pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
+ PSB_MMU_CACHED_MEMORY);
+
+ for (i = 0; i < r->npage; i++)
+ iowrite32(pte, gtt_slot++);
+ ioread32(gtt_slot - 1);
+ set_pages_array_wb(r->pages, r->npage);
+}
+
+/**
+ * psb_gtt_roll - set scrolling position
+ * @dev: our DRM device
+ * @r: the gtt mapping we are using
+ * @roll: roll offset
+ *
+ * Roll an existing pinned mapping by moving the pages through the GTT.
+ * This allows us to implement hardware scrolling on the consoles without
+ * a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+ u32 __iomem *gtt_slot;
+ u32 pte;
+ int i;
+
+ if (roll >= r->npage) {
+ WARN_ON(1);
+ return;
+ }
+
+ r->roll = roll;
+
+ /* Not currently in the GTT - no worry we will write the mapping at
+ the right position when it gets pinned */
+ if (!r->stolen && !r->in_gart)
+ return;
+
+ gtt_slot = psb_gtt_entry(dev, r);
+
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
+ PSB_MMU_CACHED_MEMORY);
+ iowrite32(pte, gtt_slot++);
+ }
+ ioread32(gtt_slot - 1);
+}
+
+/**
+ * psb_gtt_attach_pages - attach and pin GEM pages
+ * @gt: the gtt range
+ *
+ * Pin and build an in kernel list of the pages that back our GEM object.
+ * While we hold this the pages cannot be swapped out. This is protected
+ * via the gtt mutex which the caller must hold.
+ */
+static int psb_gtt_attach_pages(struct gtt_range *gt)
+{
+ struct page **pages;
+
+ WARN_ON(gt->pages);
+
+ pages = drm_gem_get_pages(&gt->gem, 0);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ gt->npage = gt->gem.size / PAGE_SIZE;
+ gt->pages = pages;
+
+ return 0;
+}
+
+/**
+ * psb_gtt_detach_pages - attach and pin GEM pages
+ * @gt: the gtt range
+ *
+ * Undo the effect of psb_gtt_attach_pages. At this point the pages
+ * must have been removed from the GTT as they could now be paged out
+ * and move bus address. This is protected via the gtt mutex which the
+ * caller must hold.
+ */
+static void psb_gtt_detach_pages(struct gtt_range *gt)
+{
+ drm_gem_put_pages(&gt->gem, gt->pages, true, false);
+ gt->pages = NULL;
+}
+
+/**
+ * psb_gtt_pin - pin pages into the GTT
+ * @gt: range to pin
+ *
+ * Pin a set of pages into the GTT. The pins are refcounted so that
+ * multiple pins need multiple unpins to undo.
+ *
+ * Non GEM backed objects treat this as a no-op as they are always GTT
+ * backed objects.
+ */
+int psb_gtt_pin(struct gtt_range *gt)
+{
+ int ret = 0;
+ struct drm_device *dev = gt->gem.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 gpu_base = dev_priv->gtt.gatt_start;
+
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ if (gt->in_gart == 0 && gt->stolen == 0) {
+ ret = psb_gtt_attach_pages(gt);
+ if (ret < 0)
+ goto out;
+ ret = psb_gtt_insert(dev, gt, 0);
+ if (ret < 0) {
+ psb_gtt_detach_pages(gt);
+ goto out;
+ }
+ psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
+ gt->pages, (gpu_base + gt->offset),
+ gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
+ }
+ gt->in_gart++;
+out:
+ mutex_unlock(&dev_priv->gtt_mutex);
+ return ret;
+}
+
+/**
+ * psb_gtt_unpin - Drop a GTT pin requirement
+ * @gt: range to pin
+ *
+ * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
+ * will be removed from the GTT which will also drop the page references
+ * and allow the VM to clean up or page stuff.
+ *
+ * Non GEM backed objects treat this as a no-op as they are always GTT
+ * backed objects.
+ */
+void psb_gtt_unpin(struct gtt_range *gt)
+{
+ struct drm_device *dev = gt->gem.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 gpu_base = dev_priv->gtt.gatt_start;
+ int ret;
+
+ /* While holding the gtt_mutex no new blits can be initiated */
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ /* Wait for any possible usage of the memory to be finished */
+ ret = gma_blt_wait_idle(dev_priv);
+ if (ret) {
+ DRM_ERROR("Failed to idle the blitter, unpin failed!");
+ goto out;
+ }
+
+ WARN_ON(!gt->in_gart);
+
+ gt->in_gart--;
+ if (gt->in_gart == 0 && gt->stolen == 0) {
+ psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
+ (gpu_base + gt->offset), gt->npage, 0, 0);
+ psb_gtt_remove(dev, gt);
+ psb_gtt_detach_pages(gt);
+ }
+
+out:
+ mutex_unlock(&dev_priv->gtt_mutex);
+}
+
+/*
+ * GTT resource allocator - allocate and manage GTT address space
+ */
+
+/**
+ * psb_gtt_alloc_range - allocate GTT address space
+ * @dev: Our DRM device
+ * @len: length (bytes) of address space required
+ * @name: resource name
+ * @backed: resource should be backed by stolen pages
+ *
+ * Ask the kernel core to find us a suitable range of addresses
+ * to use for a GTT mapping.
+ *
+ * Returns a gtt_range structure describing the object, or NULL on
+ * error. On successful return the resource is both allocated and marked
+ * as in use.
+ */
+struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+ const char *name, int backed, u32 align)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gtt_range *gt;
+ struct resource *r = dev_priv->gtt_mem;
+ int ret;
+ unsigned long start, end;
+
+ if (backed) {
+ /* The start of the GTT is the stolen pages */
+ start = r->start;
+ end = r->start + dev_priv->gtt.stolen_size - 1;
+ } else {
+ /* The rest we will use for GEM backed objects */
+ start = r->start + dev_priv->gtt.stolen_size;
+ end = r->end;
+ }
+
+ gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
+ if (gt == NULL)
+ return NULL;
+ gt->resource.name = name;
+ gt->stolen = backed;
+ gt->in_gart = backed;
+ gt->roll = 0;
+ /* Ensure this is set for non GEM objects */
+ gt->gem.dev = dev;
+ ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
+ len, start, end, align, NULL, NULL);
+ if (ret == 0) {
+ gt->offset = gt->resource.start - r->start;
+ return gt;
+ }
+ kfree(gt);
+ return NULL;
+}
+
+/**
+ * psb_gtt_free_range - release GTT address space
+ * @dev: our DRM device
+ * @gt: a mapping created with psb_gtt_alloc_range
+ *
+ * Release a resource that was allocated with psb_gtt_alloc_range. If the
+ * object has been pinned by mmap users we clean this up here currently.
+ */
+void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
+{
+ /* Undo the mmap pin if we are destroying the object */
+ if (gt->mmapping) {
+ psb_gtt_unpin(gt);
+ gt->mmapping = 0;
+ }
+ WARN_ON(gt->in_gart && !gt->stolen);
+ release_resource(&gt->resource);
+ kfree(gt);
+}
+
+static void psb_gtt_alloc(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ init_rwsem(&dev_priv->gtt.sem);
+}
+
+void psb_gtt_takedown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->gtt_map) {
+ iounmap(dev_priv->gtt_map);
+ dev_priv->gtt_map = NULL;
+ }
+ if (dev_priv->gtt_initialized) {
+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl);
+ PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
+ }
+ if (dev_priv->vram_addr)
+ iounmap(dev_priv->gtt_map);
+}
+
+int psb_gtt_init(struct drm_device *dev, int resume)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned gtt_pages;
+ unsigned long stolen_size, vram_stolen_size;
+ unsigned i, num_pages;
+ unsigned pfn_base;
+ struct psb_gtt *pg;
+
+ int ret = 0;
+ uint32_t pte;
+
+ if (!resume) {
+ mutex_init(&dev_priv->gtt_mutex);
+ psb_gtt_alloc(dev);
+ }
+
+ pg = &dev_priv->gtt;
+
+ /* Enable the GTT */
+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+ dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+ PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
+
+ /* The root resource we allocate address space from */
+ dev_priv->gtt_initialized = 1;
+
+ pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
+
+ /*
+ * The video mmu has a hw bug when accessing 0x0D0000000.
+ * Make gatt start at 0x0e000,0000. This doesn't actually
+ * matter for us but may do if the video acceleration ever
+ * gets opened up.
+ */
+ pg->mmu_gatt_start = 0xE0000000;
+
+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+ >> PAGE_SHIFT;
+ /* CDV doesn't report this. In which case the system has 64 gtt pages */
+ if (pg->gtt_start == 0 || gtt_pages == 0) {
+ dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
+ gtt_pages = 64;
+ pg->gtt_start = dev_priv->pge_ctl;
+ }
+
+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+ >> PAGE_SHIFT;
+ dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+
+ if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+ static struct resource fudge; /* Preferably peppermint */
+ /* This can occur on CDV systems. Fudge it in this case.
+ We really don't care what imaginary space is being allocated
+ at this point */
+ dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
+ pg->gatt_start = 0x40000000;
+ pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+ /* This is a little confusing but in fact the GTT is providing
+ a view from the GPU into memory and not vice versa. As such
+ this is really allocating space that is not the same as the
+ CPU address space on CDV */
+ fudge.start = 0x40000000;
+ fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+ fudge.name = "fudge";
+ fudge.flags = IORESOURCE_MEM;
+ dev_priv->gtt_mem = &fudge;
+ }
+
+ pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+ vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+ - PAGE_SIZE;
+
+ stolen_size = vram_stolen_size;
+
+ dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
+ dev_priv->stolen_base, vram_stolen_size / 1024);
+
+ if (resume && (gtt_pages != pg->gtt_pages) &&
+ (stolen_size != pg->stolen_size)) {
+ dev_err(dev->dev, "GTT resume error.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ pg->gtt_pages = gtt_pages;
+ pg->stolen_size = stolen_size;
+ dev_priv->vram_stolen_size = vram_stolen_size;
+
+ /*
+ * Map the GTT and the stolen memory area
+ */
+ if (!resume)
+ dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ gtt_pages << PAGE_SHIFT);
+ if (!dev_priv->gtt_map) {
+ dev_err(dev->dev, "Failure to map gtt.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ if (!resume)
+ dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
+ stolen_size);
+
+ if (!dev_priv->vram_addr) {
+ dev_err(dev->dev, "Failure to map stolen base.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ /*
+ * Insert vram stolen pages into the GTT
+ */
+
+ pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
+ num_pages = vram_stolen_size >> PAGE_SHIFT;
+ dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+ num_pages, pfn_base << PAGE_SHIFT, 0);
+ for (i = 0; i < num_pages; ++i) {
+ pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
+ iowrite32(pte, dev_priv->gtt_map + i);
+ }
+
+ /*
+ * Init rest of GTT to the scratch page to avoid accidents or scribbles
+ */
+
+ pfn_base = page_to_pfn(dev_priv->scratch_page);
+ pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
+ for (; i < gtt_pages; ++i)
+ iowrite32(pte, dev_priv->gtt_map + i);
+
+ (void) ioread32(dev_priv->gtt_map + i - 1);
+ return 0;
+
+out_err:
+ psb_gtt_takedown(dev);
+ return ret;
+}
+
+int psb_gtt_restore(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct resource *r = dev_priv->gtt_mem->child;
+ struct gtt_range *range;
+ unsigned int restored = 0, total = 0, size = 0;
+
+ /* On resume, the gtt_mutex is already initialized */
+ mutex_lock(&dev_priv->gtt_mutex);
+ psb_gtt_init(dev, 1);
+
+ while (r != NULL) {
+ range = container_of(r, struct gtt_range, resource);
+ if (range->pages) {
+ psb_gtt_insert(dev, range, 1);
+ size += range->resource.end - range->resource.start;
+ restored++;
+ }
+ r = r->sibling;
+ total++;
+ }
+ mutex_unlock(&dev_priv->gtt_mutex);
+ DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
+ total, (size / 1024));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
new file mode 100644
index 00000000000..f5860a739bd
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
+struct psb_gtt {
+ uint32_t gatt_start;
+ uint32_t mmu_gatt_start;
+ uint32_t gtt_start;
+ uint32_t gtt_phys_start;
+ unsigned gtt_pages;
+ unsigned gatt_pages;
+ unsigned long stolen_size;
+ unsigned long vram_stolen_size;
+ struct rw_semaphore sem;
+};
+
+/* Exported functions */
+extern int psb_gtt_init(struct drm_device *dev, int resume);
+extern void psb_gtt_takedown(struct drm_device *dev);
+
+/* Each gtt_range describes an allocation in the GTT area */
+struct gtt_range {
+ struct resource resource; /* Resource for our allocation */
+ u32 offset; /* GTT offset of our object */
+ struct drm_gem_object gem; /* GEM high level stuff */
+ int in_gart; /* Currently in the GART (ref ct) */
+ bool stolen; /* Backed from stolen RAM */
+ bool mmapping; /* Is mmappable */
+ struct page **pages; /* Backing pages if present */
+ int npage; /* Number of backing pages */
+ int roll; /* Roll applied to the GTT entries */
+};
+
+extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+ const char *name, int backed,
+ u32 align);
+extern void psb_gtt_kref_put(struct gtt_range *gt);
+extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
+extern int psb_gtt_pin(struct gtt_range *gt);
+extern void psb_gtt_unpin(struct gtt_range *gt);
+extern void psb_gtt_roll(struct drm_device *dev,
+ struct gtt_range *gt, int roll);
+extern int psb_gtt_restore(struct drm_device *dev);
+#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
new file mode 100644
index 00000000000..d3497348c4d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -0,0 +1,597 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
+
+static void *find_section(struct bdb_header *bdb, int section_id)
+{
+ u8 *base = (u8 *)bdb;
+ int index = 0;
+ u16 total, current_size;
+ u8 current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index < total) {
+ current_id = *(base + index);
+ index++;
+ current_size = *((u16 *)(base + index));
+ index += 2;
+ if (current_id == section_id)
+ return base + index;
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+static void
+parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
+ uint8_t panel_type;
+
+ edp = find_section(bdb, BDB_EDP);
+
+ dev_priv->edp.bpp = 18;
+ if (!edp) {
+ if (dev_priv->edp.support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
+ dev_priv->edp.bpp);
+ }
+ return;
+ }
+
+ panel_type = dev_priv->panel_type;
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->edp.pps = *edp_pps;
+
+ DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
+ dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
+ dev_priv->edp.pps.t11_t12);
+
+ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->edp.lanes = 1;
+ break;
+ case 1:
+ dev_priv->edp.lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->edp.lanes = 4;
+ break;
+ }
+ DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
+ dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
+
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ }
+ DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",
+ dev_priv->edp.vswing, dev_priv->edp.preemphasis);
+}
+
+static u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+static void parse_backlight_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
+ struct bdb_lvds_backlight *lvds_bl;
+ u8 p_type = 0;
+ void *bl_start = NULL;
+ struct bdb_lvds_options *lvds_opts
+ = find_section(bdb, BDB_LVDS_OPTIONS);
+
+ dev_priv->lvds_bl = NULL;
+
+ if (lvds_opts)
+ p_type = lvds_opts->panel_type;
+ else
+ return;
+
+ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
+ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
+
+ lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
+ if (!lvds_bl) {
+ dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+ return;
+ }
+ dev_priv->lvds_bl = lvds_bl;
+}
+
+/* Try to find integrated panel data */
+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_lvds_options *lvds_options;
+ struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_entry *entry;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+ /* Defaults if we can't find VBT info */
+ dev_priv->lvds_dither = 0;
+ dev_priv->lvds_vbt = 0;
+
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ dev_priv->lvds_dither = lvds_options->pixel_dither;
+ dev_priv->panel_type = lvds_options->panel_type;
+
+ if (lvds_options->panel_type == 0xff)
+ return;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!lvds_lfp_data)
+ return;
+
+
+ entry = &lvds_lfp_data->data[lvds_options->panel_type];
+ dvo_timing = &entry->dvo_timing;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
+ GFP_KERNEL);
+ if (panel_fixed_mode == NULL) {
+ dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+ return;
+ }
+
+ dev_priv->lvds_vbt = 1;
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+
+ if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+ } else {
+ dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+ dev_priv->lvds_vbt = 0;
+ kfree(panel_fixed_mode);
+ }
+ return;
+}
+
+/* Try to find sdvo panel data */
+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dvo_timing)
+ return;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode,
+ dvo_timing + sdvo_lvds_options->panel_type);
+
+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ return;
+}
+
+static void parse_general_features(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_features *general;
+
+ /* Set sensible defaults in case we can't find the general block */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->int_tv_support = general->int_tv_support;
+ dev_priv->int_crt_support = general->int_crt_support;
+ dev_priv->lvds_use_ssc = general->enable_ssc;
+
+ if (dev_priv->lvds_use_ssc) {
+ dev_priv->lvds_ssc_freq
+ = general->ssc_freq ? 100 : 96;
+ }
+ }
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
+
+
+static void
+parse_driver_features(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_driver_features *driver;
+
+ driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
+
+ /* This bit means to use 96Mhz for DPLL_A or not */
+ if (driver->primary_lfp_id)
+ dev_priv->dplla_96mhz = true;
+ else
+ dev_priv->dplla_96mhz = false;
+}
+
+static void
+parse_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child devices that are present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ return;
+ }
+ dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child devices\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
+
+
+/**
+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+int psb_intel_init_bios(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ struct vbt_header *vbt = NULL;
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
+ size_t size;
+ int i;
+
+
+ dev_priv->panel_type = 0xff;
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
+ }
+
+ if (bdb == NULL) {
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ dev_err(dev->dev, "VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ }
+
+ /* Grab useful general dxefinitions */
+ parse_general_features(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
+ parse_backlight_data(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
+
+ if (bios)
+ pci_unmap_rom(pdev, bios);
+
+ return 0;
+}
+
+/**
+ * Destroy and free VBT data
+ */
+void psb_intel_destroy_bios(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ kfree(dev_priv->sdvo_lvds_vbt_mode);
+ kfree(dev_priv->lfp_lvds_vbt_mode);
+ kfree(dev_priv->lvds_bl);
+}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
new file mode 100644
index 00000000000..978ae4b25e8
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
+
+struct vbt_header {
+ u8 signature[20]; /**< Always starts with 'VBT$' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 vbt_size; /**< in bytes */
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset; /**< from beginning of VBT */
+ u32 aim_offset[4]; /**< from beginning of VBT */
+} __attribute__((packed));
+
+
+struct bdb_header {
+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 bdb_size; /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd8:3; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:6; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
+} __attribute__((packed));
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[0];
+};
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+} __attribute__((packed));
+
+struct bdb_lvds_backlight {
+ u8 type:2;
+ u8 pol:1;
+ u8 gpio:3;
+ u8 gmbus:2;
+ u16 freq;
+ u8 minbrightness;
+ u8 i2caddr;
+ u8 brightnesscmd;
+ /*FIXME: more...*/
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 h_image;
+ u8 v_image;
+ u8 max_hv;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+} __attribute__((packed));
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ u32 sdrrs_msa_timing_delay;
+ struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
+extern int psb_intel_init_bios(struct drm_device *dev);
+extern void psb_intel_destroy_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
+#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644
index 00000000000..566d330aaee
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
+#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_psb_private *dev_priv;
+ u32 reg;
+};
+
+void
+gma_intel_i2c_reset(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ GMBUS_REG_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
+{
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+ /* FIXME: We are never Pineview, right?
+
+ u32 val;
+
+ if (!IS_PINEVIEW(dev_priv->dev))
+ return;
+
+ val = REG_READ(DSPCLK_GATE_D);
+ if (enable)
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ REG_WRITE(DSPCLK_GATE_D, val);
+
+ return;
+ */
+}
+
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ u32 reserved = 0;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved = GMBUS_REG_READ(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
+
+static int get_clock(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ GMBUS_REG_WRITE(gpio->reg, reserved);
+ return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ GMBUS_REG_WRITE(gpio->reg, reserved);
+ return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 clock_bits;
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+
+ GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
+ GMBUS_REG_READ(gpio->reg); /* Posting */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 data_bits;
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
+ GMBUS_REG_READ(gpio->reg);
+}
+
+static struct i2c_adapter *
+intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
+{
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ 0,
+ GPIOF,
+ };
+ struct intel_gpio *gpio;
+
+ if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+ return NULL;
+
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
+
+ gpio->reg = map_pin_to_reg[pin];
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "gma500 GPIO%c", "?BACDE?F"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
+ int ret;
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ udelay(I2C_RISEFALL_TIME);
+
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ int i, reg_offset;
+
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
+
+ reg_offset = 0;
+
+ GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+ u8 *buf = msgs[i].buf;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ GMBUS_REG_READ(GMBUS2+reg_offset);
+ do {
+ u32 val, loop = 0;
+
+ if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+ (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = GMBUS_REG_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ } while (len);
+ } else {
+ u32 val, loop;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset,
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ GMBUS_REG_READ(GMBUS2+reg_offset);
+
+ while (len) {
+ if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+ (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
+ GMBUS_SATOER)
+ goto clear_err;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+ GMBUS_REG_READ(GMBUS2+reg_offset);
+ }
+ }
+
+ if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ goto timeout;
+ if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+ }
+
+ goto done;
+
+clear_err:
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+ /* Mark the GMBUS interface as disabled. We will re-enable it at the
+ * start of the next xfer, till then let it sleep.
+ */
+ GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
+ return i;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ bus->reg0 & 0xff, bus->adapter.name);
+ GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
+
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int gma_intel_setup_gmbus(struct drm_device *dev)
+{
+ static const char *names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved",
+ "dpd",
+ };
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
+ GFP_KERNEL);
+ if (dev_priv->gmbus == NULL)
+ return -ENOMEM;
+
+ if (IS_MRST(dev))
+ dev_priv->gmbus_reg = dev_priv->aux_reg;
+ else
+ dev_priv->gmbus_reg = dev_priv->vdc_reg;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "gma500 gmbus %s",
+ names[i]);
+
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.algo_data = dev_priv;
+
+ bus->adapter.algo = &gmbus_algorithm;
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
+
+ /* By default use a conservative clock rate */
+ bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ bus->force_bit = intel_gpio_create(dev_priv, i);
+ }
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+ return ret;
+}
+
+void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
+}
+
+void gma_intel_teardown_gmbus(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->gmbus == NULL)
+ return;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
+ i2c_del_adapter(&bus->adapter);
+ }
+
+ dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+}
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
new file mode 100644
index 00000000000..98a28c20955
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 val;
+
+ val = REG_READ(chan->reg);
+ return (val & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 val;
+
+ val = REG_READ(chan->reg);
+ return (val & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 reserved = 0, clock_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved =
+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+ REG_WRITE(chan->reg, reserved | clock_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 reserved = 0, data_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved =
+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits =
+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ REG_WRITE(chan->reg, reserved | data_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+/**
+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ * %GPIOA
+ * %GPIOB
+ * %GPIOC
+ * %GPIOD
+ * %GPIOE
+ * %GPIOF
+ * %GPIOG
+ * %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+ const u32 reg, const char *name)
+{
+ struct psb_intel_i2c_chan *chan;
+
+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+ if (!chan)
+ goto out_free;
+
+ chan->drm_dev = dev;
+ chan->reg = reg;
+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+ chan->adapter.owner = THIS_MODULE;
+ chan->adapter.algo_data = &chan->algo;
+ chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->algo.setsda = set_data;
+ chan->algo.setscl = set_clock;
+ chan->algo.getsda = get_data;
+ chan->algo.getscl = get_clock;
+ chan->algo.udelay = 20;
+ chan->algo.timeout = usecs_to_jiffies(2200);
+ chan->algo.data = chan;
+
+ i2c_set_adapdata(&chan->adapter, chan);
+
+ if (i2c_bit_add_bus(&chan->adapter))
+ goto out_free;
+
+ /* JJJ: raise SCL and SDA? */
+ set_data(chan, 1);
+ set_clock(chan, 1);
+ udelay(20);
+
+ return chan;
+
+out_free:
+ kfree(chan);
+ return NULL;
+}
+
+/**
+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+{
+ if (!chan)
+ return;
+
+ i2c_del_adapter(&chan->adapter);
+ kfree(chan);
+}
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
new file mode 100644
index 00000000000..265ad0de44a
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -0,0 +1,551 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include "psb_drv.h"
+#include "mid_bios.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+#include "tc35876x-dsi-lvds.h"
+
+#include <asm/intel_scu_ipc.h>
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BRIGHTNESS_MIN_LEVEL 1
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+#define BLC_ADJUSTMENT_MAX 100
+
+#define MDFLD_BLC_PWM_PRECISION_FACTOR 10
+#define MDFLD_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define MDFLD_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static struct backlight_device *mdfld_backlight_device;
+
+int mdfld_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev =
+ (struct drm_device *)bl_get_data(mdfld_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int level = bd->props.brightness;
+
+ DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
+
+ /* Perform value bounds checking */
+ if (level < BRIGHTNESS_MIN_LEVEL)
+ level = BRIGHTNESS_MIN_LEVEL;
+
+ if (gma_power_begin(dev, false)) {
+ u32 adjusted_level = 0;
+
+ /*
+ * Adjust the backlight level with the percent in
+ * dev_priv->blc_adj2
+ */
+ adjusted_level = level * dev_priv->blc_adj2;
+ adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX;
+ dev_priv->brightness_adjusted = adjusted_level;
+
+ if (mdfld_get_panel_type(dev, 0) == TC35876X) {
+ if (dev_priv->dpi_panel_on[0] ||
+ dev_priv->dpi_panel_on[2])
+ tc35876x_brightness_control(dev,
+ dev_priv->brightness_adjusted);
+ } else {
+ if (dev_priv->dpi_panel_on[0])
+ mdfld_dsi_brightness_control(dev, 0,
+ dev_priv->brightness_adjusted);
+ }
+
+ if (dev_priv->dpi_panel_on[2])
+ mdfld_dsi_brightness_control(dev, 2,
+ dev_priv->brightness_adjusted);
+ gma_power_end(dev);
+ }
+
+ /* cache the brightness for later use */
+ dev_priv->brightness = level;
+ return 0;
+}
+
+static int mdfld_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev =
+ (struct drm_device *)bl_get_data(mdfld_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness);
+
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ return dev_priv->brightness;
+}
+
+static const struct backlight_ops mdfld_ops = {
+ .get_brightness = mdfld_get_brightness,
+ .update_status = mdfld_set_brightness,
+};
+
+static int device_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
+ dev->dev_private;
+
+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+
+ return 0;
+}
+
+static int mdfld_backlight_init(struct drm_device *dev)
+{
+ struct backlight_properties props;
+ int ret = 0;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+ props.type = BACKLIGHT_PLATFORM;
+ mdfld_backlight_device = backlight_device_register("mdfld-bl",
+ NULL, (void *)dev, &mdfld_ops, &props);
+
+ if (IS_ERR(mdfld_backlight_device))
+ return PTR_ERR(mdfld_backlight_device);
+
+ ret = device_backlight_init(dev);
+ if (ret)
+ return ret;
+
+ mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
+ mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+ backlight_update_status(mdfld_backlight_device);
+ return 0;
+}
+#endif
+
+struct backlight_device *mdfld_get_backlight_device(void)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ return mdfld_backlight_device;
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * mdfld_save_display_registers
+ *
+ * Description: We are going to suspend so save current display
+ * register state.
+ *
+ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
+ */
+static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct medfield_state *regs = &dev_priv->regs.mdfld;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
+ int i;
+ u32 *mipi_val;
+
+ /* register */
+ u32 mipi_reg = MIPI;
+
+ switch (pipenum) {
+ case 0:
+ mipi_val = &regs->saveMIPI;
+ break;
+ case 1:
+ mipi_val = &regs->saveMIPI;
+ break;
+ case 2:
+ /* register */
+ mipi_reg = MIPI_C;
+ /* pointer to values */
+ mipi_val = &regs->saveMIPI_C;
+ break;
+ default:
+ DRM_ERROR("%s, invalid pipe number.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Pipe & plane A info */
+ pipe->dpll = PSB_RVDC32(map->dpll);
+ pipe->fp0 = PSB_RVDC32(map->fp0);
+ pipe->conf = PSB_RVDC32(map->conf);
+ pipe->htotal = PSB_RVDC32(map->htotal);
+ pipe->hblank = PSB_RVDC32(map->hblank);
+ pipe->hsync = PSB_RVDC32(map->hsync);
+ pipe->vtotal = PSB_RVDC32(map->vtotal);
+ pipe->vblank = PSB_RVDC32(map->vblank);
+ pipe->vsync = PSB_RVDC32(map->vsync);
+ pipe->src = PSB_RVDC32(map->src);
+ pipe->stride = PSB_RVDC32(map->stride);
+ pipe->linoff = PSB_RVDC32(map->linoff);
+ pipe->tileoff = PSB_RVDC32(map->tileoff);
+ pipe->size = PSB_RVDC32(map->size);
+ pipe->pos = PSB_RVDC32(map->pos);
+ pipe->surf = PSB_RVDC32(map->surf);
+ pipe->cntr = PSB_RVDC32(map->cntr);
+ pipe->status = PSB_RVDC32(map->status);
+
+ /*save palette (gamma) */
+ for (i = 0; i < 256; i++)
+ pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
+
+ if (pipenum == 1) {
+ regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+
+ regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
+ regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
+ return 0;
+ }
+
+ *mipi_val = PSB_RVDC32(mipi_reg);
+ return 0;
+}
+
+/*
+ * mdfld_restore_display_registers
+ *
+ * Description: We are going to resume so restore display register state.
+ *
+ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
+ */
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
+{
+ /* To get panel out of ULPS mode. */
+ u32 temp = 0;
+ u32 device_ready_reg = DEVICE_READY_REG;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dsi_config *dsi_config = NULL;
+ struct medfield_state *regs = &dev_priv->regs.mdfld;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
+ u32 i;
+ u32 dpll;
+ u32 timeout = 0;
+
+ /* register */
+ u32 mipi_reg = MIPI;
+
+ /* values */
+ u32 dpll_val = pipe->dpll;
+ u32 mipi_val = regs->saveMIPI;
+
+ switch (pipenum) {
+ case 0:
+ dpll_val &= ~DPLL_VCO_ENABLE;
+ dsi_config = dev_priv->dsi_configs[0];
+ break;
+ case 1:
+ dpll_val &= ~DPLL_VCO_ENABLE;
+ break;
+ case 2:
+ mipi_reg = MIPI_C;
+ mipi_val = regs->saveMIPI_C;
+ dsi_config = dev_priv->dsi_configs[1];
+ break;
+ default:
+ DRM_ERROR("%s, invalid pipe number.\n", __func__);
+ return -EINVAL;
+ }
+
+ /*make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ if (pipenum == 1) {
+ PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
+ PSB_RVDC32(map->dpll);
+
+ PSB_WVDC32(pipe->fp0, map->fp0);
+ } else {
+
+ dpll = PSB_RVDC32(map->dpll);
+
+ if (!(dpll & DPLL_VCO_ENABLE)) {
+
+ /* When ungating power of DPLL, needs to wait 0.5us
+ before enable the VCO */
+ if (dpll & MDFLD_PWR_GATE_EN) {
+ dpll &= ~MDFLD_PWR_GATE_EN;
+ PSB_WVDC32(dpll, map->dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ PSB_WVDC32(pipe->fp0, map->fp0);
+ PSB_WVDC32(dpll_val, map->dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ dpll_val |= DPLL_VCO_ENABLE;
+ PSB_WVDC32(dpll_val, map->dpll);
+ PSB_RVDC32(map->dpll);
+
+ /* wait for DSI PLL to lock */
+ while (timeout < 20000 &&
+ !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout++;
+ }
+
+ if (timeout == 20000) {
+ DRM_ERROR("%s, can't lock DSIPLL.\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ }
+ /* Restore mode */
+ PSB_WVDC32(pipe->htotal, map->htotal);
+ PSB_WVDC32(pipe->hblank, map->hblank);
+ PSB_WVDC32(pipe->hsync, map->hsync);
+ PSB_WVDC32(pipe->vtotal, map->vtotal);
+ PSB_WVDC32(pipe->vblank, map->vblank);
+ PSB_WVDC32(pipe->vsync, map->vsync);
+ PSB_WVDC32(pipe->src, map->src);
+ PSB_WVDC32(pipe->status, map->status);
+
+ /*set up the plane*/
+ PSB_WVDC32(pipe->stride, map->stride);
+ PSB_WVDC32(pipe->linoff, map->linoff);
+ PSB_WVDC32(pipe->tileoff, map->tileoff);
+ PSB_WVDC32(pipe->size, map->size);
+ PSB_WVDC32(pipe->pos, map->pos);
+ PSB_WVDC32(pipe->surf, map->surf);
+
+ if (pipenum == 1) {
+ /* restore palette (gamma) */
+ /*DRM_UDELAY(50000); */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
+
+ PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+
+ /*TODO: resume HDMI port */
+
+ /*TODO: resume pipe*/
+
+ /*enable the plane*/
+ PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
+
+ return 0;
+ }
+
+ /*set up pipe related registers*/
+ PSB_WVDC32(mipi_val, mipi_reg);
+
+ /*setup MIPI adapter + MIPI IP registers*/
+ if (dsi_config)
+ mdfld_dsi_controller_init(dsi_config, pipenum);
+
+ if (in_atomic() || in_interrupt())
+ mdelay(20);
+ else
+ msleep(20);
+
+ /*enable the plane*/
+ PSB_WVDC32(pipe->cntr, map->cntr);
+
+ if (in_atomic() || in_interrupt())
+ mdelay(20);
+ else
+ msleep(20);
+
+ /* LP Hold Release */
+ temp = REG_READ(mipi_reg);
+ temp |= LP_OUTPUT_HOLD_RELEASE;
+ REG_WRITE(mipi_reg, temp);
+ mdelay(1);
+
+
+ /* Set DSI host to exit from Utra Low Power State */
+ temp = REG_READ(device_ready_reg);
+ temp &= ~ULPS_MASK;
+ temp |= 0x3;
+ temp |= EXIT_ULPS_DEV_READY;
+ REG_WRITE(device_ready_reg, temp);
+ mdelay(1);
+
+ temp = REG_READ(device_ready_reg);
+ temp &= ~ULPS_MASK;
+ temp |= EXITING_ULPS;
+ REG_WRITE(device_ready_reg, temp);
+ mdelay(1);
+
+ /*enable the pipe*/
+ PSB_WVDC32(pipe->conf, map->conf);
+
+ /* restore palette (gamma) */
+ /*DRM_UDELAY(50000); */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
+
+ return 0;
+}
+
+static int mdfld_save_registers(struct drm_device *dev)
+{
+ /* mdfld_save_cursor_overlay_registers(dev); */
+ mdfld_save_display_registers(dev, 0);
+ mdfld_save_display_registers(dev, 2);
+ mdfld_disable_crtc(dev, 0);
+ mdfld_disable_crtc(dev, 2);
+
+ return 0;
+}
+
+static int mdfld_restore_registers(struct drm_device *dev)
+{
+ mdfld_restore_display_registers(dev, 2);
+ mdfld_restore_display_registers(dev, 0);
+ /* mdfld_restore_cursor_overlay_registers(dev); */
+
+ return 0;
+}
+
+static int mdfld_power_down(struct drm_device *dev)
+{
+ /* FIXME */
+ return 0;
+}
+
+static int mdfld_power_up(struct drm_device *dev)
+{
+ /* FIXME */
+ return 0;
+}
+
+/* Medfield */
+static const struct psb_offset mdfld_regmap[3] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = MDFLD_DPLL_DIV0,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = MDFLD_DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = MRST_DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+ {
+ .fp0 = MRST_FPA0, /* This is what the old code did ?? */
+ .cntr = DSPCCNTR,
+ .conf = PIPECCONF,
+ .src = PIPECSRC,
+ /* No DPLL_C */
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_C,
+ .hblank = HBLANK_C,
+ .hsync = HSYNC_C,
+ .vtotal = VTOTAL_C,
+ .vblank = VBLANK_C,
+ .vsync = VSYNC_C,
+ .stride = DSPCSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPCPOS,
+ .surf = DSPCSURF,
+ .addr = MDFLD_DSPCBASE,
+ .status = PIPECSTAT,
+ .linoff = DSPCLINOFF,
+ .tileoff = DSPCTILEOFF,
+ .palette = PALETTE_C,
+ },
+};
+
+static int mdfld_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = mdfld_regmap;
+ return mid_chip_setup(dev);
+}
+
+const struct psb_ops mdfld_chip_ops = {
+ .name = "mdfld",
+ .accel_2d = 0,
+ .pipes = 3,
+ .crtcs = 3,
+ .lvds_mask = (1 << 1),
+ .hdmi_mask = (1 << 1),
+ .cursor_needs_phys = 0,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = mdfld_chip_setup,
+ .crtc_helper = &mdfld_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = mdfld_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = mdfld_backlight_init,
+#endif
+
+ .save_regs = mdfld_save_registers,
+ .restore_regs = mdfld_restore_registers,
+ .power_down = mdfld_power_down,
+ .power_up = mdfld_power_up,
+};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
new file mode 100644
index 00000000000..d4813e03f5e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "psb_drv.h"
+#include "tc35876x-dsi-lvds.h"
+
+static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
+ int pipe);
+
+static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) &&
+ (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg)
+ & DSI_FIFO_GEN_HS_CTRL_FULL)) {
+ udelay(100);
+ timeout++;
+ }
+ if (timeout == 20000)
+ DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) &
+ DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ DRM_ERROR("MIPI: DPI FIFO was never cleared\n");
+}
+
+static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
+{
+ u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && (!(REG_READ(intr_stat_reg)
+ & DSI_INTR_STATE_SPL_PKG_SENT))) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
+}
+
+/* For TC35876X */
+
+static void dsi_set_device_ready_state(struct drm_device *dev, int state,
+ int pipe)
+{
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0);
+}
+
+static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
+ int state, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+
+ u32 dspcntr = dev_priv->dspcntr[pipe];
+ u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+ if (pipe) {
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ } else
+ mipi &= (~0x03);
+
+ if (state) {
+ /*Set up pipe */
+ REG_WRITE(pipeconf_reg, BIT(31));
+
+ if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
+ dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
+ __func__);
+
+ /*Set up display plane */
+ REG_WRITE(dspcntr_reg, dspcntr);
+ } else {
+ u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE;
+
+ /* Put DSI lanes to ULPS to disable pipe */
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1);
+ REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */
+
+ /* LP Hold */
+ REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16);
+ REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */
+
+ /* Disable display plane */
+ REG_FLD_MOD(dspcntr_reg, 0, 31, 31);
+
+ /* Flush the plane changes ??? posted write? */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+
+ /* Disable PIPE */
+ REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
+
+ if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
+ dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
+ __func__);
+
+ if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
+ dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
+ __func__);
+ }
+}
+
+static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder,
+ int pipe)
+{
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->dpi_panel_on[pipe]) {
+ dev_err(dev->dev, "DPI panel is already off\n");
+ return;
+ }
+ tc35876x_toshiba_bridge_panel_off(dev);
+ tc35876x_set_bridge_reset_state(dev, 1);
+ dsi_set_pipe_plane_enable_state(dev, 0, pipe);
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ dsi_set_device_ready_state(dev, 0, pipe);
+}
+
+static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder,
+ int pipe)
+{
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->dpi_panel_on[pipe]) {
+ dev_err(dev->dev, "DPI panel is already on\n");
+ return;
+ }
+
+ /* For resume path sequence */
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ dsi_set_device_ready_state(dev, 0, pipe);
+
+ dsi_set_device_ready_state(dev, 1, pipe);
+ tc35876x_set_bridge_reset_state(dev, 0);
+ tc35876x_configure_lvds_bridge(dev);
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */
+ dsi_set_pipe_plane_enable_state(dev, 1, pipe);
+}
+/* End for TC35876X */
+
+/* ************************************************************************* *\
+ * FUNCTION: mdfld_dsi_tpo_ic_init
+ *
+ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
+ * restore_display_registers. since this function does not
+ * acquire the mutex, it is important that the calling function
+ * does!
+\* ************************************************************************* */
+static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ u32 dcsChannelNumber = dsi_config->channel_num;
+ u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
+ u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
+ u32 gen_ctrl_val = GEN_LONG_WRITE;
+
+ DRM_INFO("Enter mrst init TPO MIPI display.\n");
+
+ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
+
+ /* Flip page order */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00008036);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+ /* 0xF0 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5af0);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* Write protection key */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5af1);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xFC */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5afc);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xB7 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x770000b7);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000044);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
+
+ /* 0xB6 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000a0ab6);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xF2 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x081010f2);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x4a070708);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000000c5);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xF8 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x024003f8);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x01030a04);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x0e020220);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000004);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
+
+ /* 0xE2 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x398fc3e2);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x0000916f);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
+
+ /* 0xB0 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000000b0);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+ /* 0xF4 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x240242f4);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x78ee2002);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2a071050);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x507fee10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x10300710);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
+
+ /* 0xBA */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x19fe07ba);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x101c0a31);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000010);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xBB */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x28ff07bb);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x24280a31);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000034);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xFB */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535d05fb);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1b1a2130);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x221e180e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x131d2120);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535d0508);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1c1a2131);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x231f160d);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x111b2220);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535c2008);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1f1d2433);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2c251a10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2c34372d);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000023);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+ /* 0xFA */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x525c0bfa);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1c1c232f);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2623190e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x18212625);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x545d0d0e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1e1d2333);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x26231a10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1a222725);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x545d280f);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x21202635);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x31292013);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x31393d33);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000029);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+ /* Set DM */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000100f7);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+}
+
+static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
+ int num_lane, int bpp)
+{
+ return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
+}
+
+/*
+ * Calculate the dpi time basing on a given drm mode @mode
+ * return 0 on success.
+ * FIXME: I was using proposed mode value for calculation, may need to
+ * use crtc mode values later
+ */
+int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+ struct mdfld_dsi_dpi_timing *dpi_timing,
+ int num_lane, int bpp)
+{
+ int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
+ int pclk_vsync, pclk_vfp, pclk_vbp;
+
+ pclk_hactive = mode->hdisplay;
+ pclk_hfp = mode->hsync_start - mode->hdisplay;
+ pclk_hsync = mode->hsync_end - mode->hsync_start;
+ pclk_hbp = mode->htotal - mode->hsync_end;
+
+ pclk_vfp = mode->vsync_start - mode->vdisplay;
+ pclk_vsync = mode->vsync_end - mode->vsync_start;
+ pclk_vbp = mode->vtotal - mode->vsync_end;
+
+ /*
+ * byte clock counts were calculated by following formula
+ * bclock_count = pclk_count * bpp / num_lane / 8
+ */
+ dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hsync, num_lane, bpp);
+ dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hbp, num_lane, bpp);
+ dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hfp, num_lane, bpp);
+ dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hactive, num_lane, bpp);
+ dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_vsync, num_lane, bpp);
+ dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_vbp, num_lane, bpp);
+ dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_vfp, num_lane, bpp);
+
+ return 0;
+}
+
+void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ int lane_count = dsi_config->lane_count;
+ struct mdfld_dsi_dpi_timing dpi_timing;
+ struct drm_display_mode *mode = dsi_config->mode;
+ u32 val;
+
+ /*un-ready device*/
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);
+
+ /*init dsi adapter before kicking off*/
+ REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
+
+ /*enable all interrupts*/
+ REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
+
+ /*set up func_prg*/
+ val = lane_count;
+ val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
+
+ switch (dsi_config->bpp) {
+ case 16:
+ val |= DSI_DPI_COLOR_FORMAT_RGB565;
+ break;
+ case 18:
+ val |= DSI_DPI_COLOR_FORMAT_RGB666;
+ break;
+ case 24:
+ val |= DSI_DPI_COLOR_FORMAT_RGB888;
+ break;
+ default:
+ DRM_ERROR("unsupported color format, bpp = %d\n",
+ dsi_config->bpp);
+ }
+ REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);
+
+ REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
+ (mode->vtotal * mode->htotal * dsi_config->bpp /
+ (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
+ REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
+ 0xffff & DSI_LP_RX_TIMEOUT_MASK);
+
+ /*max value: 20 clock cycles of txclkesc*/
+ REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
+ 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
+
+ /*min 21 txclkesc, max: ffffh*/
+ REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
+ 0xffff & DSI_RESET_TIMER_MASK);
+
+ REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
+ mode->vdisplay << 16 | mode->hdisplay);
+
+ /*set DPI timing registers*/
+ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
+ dsi_config->lane_count, dsi_config->bpp);
+
+ REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
+ dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
+ dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
+ dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
+ dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
+ dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
+ dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
+ dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+
+ REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);
+
+ /*min: 7d0 max: 4e20*/
+ REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);
+
+ /*set up video mode*/
+ val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
+ REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);
+
+ REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
+
+ REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
+
+ /*TODO: figure out how to setup these registers*/
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
+ else
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);
+
+ REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
+
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
+
+ /*set device ready*/
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
+}
+
+void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
+{
+ struct drm_device *dev = output->dev;
+
+ /* clear special packet sent bit */
+ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+ REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+ DSI_INTR_STATE_SPL_PKG_SENT);
+
+ /*send turn on package*/
+ REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON);
+
+ /*wait for SPL_PKG_SENT interrupt*/
+ mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
+
+ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+ REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+ DSI_INTR_STATE_SPL_PKG_SENT);
+
+ output->panel_on = 1;
+
+ /* FIXME the following is disabled to WA the X slow start issue
+ for TMD panel
+ if (pipe == 2)
+ dev_priv->dpi_panel_on2 = true;
+ else if (pipe == 0)
+ dev_priv->dpi_panel_on = true; */
+}
+
+static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
+ int pipe)
+{
+ struct drm_device *dev = output->dev;
+
+ /*if output is on, or mode setting didn't happen, ignore this*/
+ if ((!output->panel_on) || output->first_boot) {
+ output->first_boot = 0;
+ return;
+ }
+
+ /* Wait for dpi fifo to empty */
+ mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
+
+ /* Clear the special packet interrupt bit if set */
+ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+ REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+ DSI_INTR_STATE_SPL_PKG_SENT);
+
+ if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN)
+ goto shutdown_out;
+
+ REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN);
+
+shutdown_out:
+ output->panel_on = 0;
+ output->first_boot = 0;
+
+ /* FIXME the following is disabled to WA the X slow start issue
+ for TMD panel
+ if (pipe == 2)
+ dev_priv->dpi_panel_on2 = false;
+ else if (pipe == 0)
+ dev_priv->dpi_panel_on = false; */
+}
+
+static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /*start up display island if it was shutdown*/
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ mdfld_dsi_configure_up(dsi_encoder, pipe);
+ else {
+ /*enable mipi port*/
+ REG_WRITE(MIPI_PORT_CONTROL(pipe),
+ REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31));
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+ }
+ dev_priv->dpi_panel_on[pipe] = true;
+ } else {
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ mdfld_dsi_configure_down(dsi_encoder, pipe);
+ else {
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+
+ /*disable mipi port*/
+ REG_WRITE(MIPI_PORT_CONTROL(pipe),
+ REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31));
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+ }
+ dev_priv->dpi_panel_on[pipe] = false;
+ }
+ gma_power_end(dev);
+}
+
+void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
+{
+ mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+
+ if (fixed_mode) {
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ }
+ return true;
+}
+
+void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
+{
+ mdfld_dsi_dpi_set_power(encoder, false);
+}
+
+void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
+{
+ mdfld_dsi_dpi_set_power(encoder, true);
+}
+
+/* For TC35876X */
+/* This functionality was implemented in FW in iCDK */
+/* But removed in DV0 and later. So need to add here. */
+static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+
+ REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
+ REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
+ REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff);
+ REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff);
+ REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14);
+ REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff);
+ REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25);
+ REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0);
+ REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
+ REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
+ REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820);
+ REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
+}
+
+static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ struct mdfld_dsi_dpi_timing dpi_timing;
+ struct drm_display_mode *mode = dsi_config->mode;
+
+ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
+ dsi_config->lane_count,
+ dsi_config->bpp);
+
+ REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
+ mode->vdisplay << 16 | mode->hdisplay);
+ REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
+ dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
+ dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
+ dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
+ dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
+ dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
+ dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
+ dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+}
+
+static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ int lane_count = dsi_config->lane_count;
+
+ if (pipe) {
+ REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002);
+ REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000);
+ } else {
+ REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000);
+ REG_WRITE(MIPI_PORT_CONTROL(2), 0x00);
+ }
+
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F);
+ REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F);
+
+ /* lane_count = 3 */
+ REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count);
+
+ mdfld_mipi_set_video_timing(dsi_config, pipe);
+}
+
+static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_display_mode *mode = dsi_config->mode;
+
+ REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(HSYNC_A,
+ ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1));
+
+ REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(VSYNC_A,
+ ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1));
+
+ REG_WRITE(PIPEASRC,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+/* End for TC35876X */
+
+void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+
+ u32 pipeconf = dev_priv->pipeconf[pipe];
+ u32 dspcntr = dev_priv->dspcntr[pipe];
+ u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+ if (pipe) {
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ } else {
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ mipi &= (~0x03); /* Use all four lanes */
+ else
+ mipi |= 2;
+ }
+
+ /*start up display island if it was shutdown*/
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+ /*
+ * The following logic is required to reset the bridge and
+ * configure. This also starts the DSI clock at 200MHz.
+ */
+ tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
+ tc35876x_toshiba_bridge_panel_on(dev);
+ udelay(100);
+ /* Now start the DSI clock */
+ REG_WRITE(MRST_DPLL_A, 0x00);
+ REG_WRITE(MRST_FPA0, 0xC1);
+ REG_WRITE(MRST_DPLL_A, 0x00800000);
+ udelay(500);
+ REG_WRITE(MRST_DPLL_A, 0x80800000);
+
+ if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
+ dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
+ __func__);
+
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
+
+ mipi_set_properties(dsi_config, pipe);
+ mdfld_mipi_config(dsi_config, pipe);
+ mdfld_set_pipe_timing(dsi_config, pipe);
+
+ REG_WRITE(DSPABASE, 0x00);
+ REG_WRITE(DSPASIZE,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+
+ REG_WRITE(DSPACNTR, 0x98000000);
+ REG_WRITE(DSPASURF, 0x00);
+
+ REG_WRITE(VGACNTRL, 0x80000000);
+ REG_WRITE(DEVICE_READY_REG, 0x00000001);
+
+ REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000);
+ } else {
+ /*set up mipi port FIXME: do at init time */
+ REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi);
+ }
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+ /* NOP */
+ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+ /* set up DSI controller DPI interface */
+ mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+
+ /* Configure MIPI Bridge and Panel */
+ tc35876x_configure_lvds_bridge(dev);
+ dev_priv->dpi_panel_on[pipe] = true;
+ } else {
+ /*turn on DPI interface*/
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ }
+
+ /*set up pipe*/
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ /*set up display plane*/
+ REG_WRITE(dspcntr_reg, dspcntr);
+ REG_READ(dspcntr_reg);
+
+ msleep(20); /* FIXME: this should wait for vblank */
+
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+ /* NOP */
+ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ } else {
+ /* init driver ic */
+ mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+ /*init backlight*/
+ mdfld_dsi_brightness_init(dsi_config, pipe);
+ }
+
+ gma_power_end(dev);
+}
+
+/*
+ * Init DSI DPI encoder.
+ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+ * return pointer of newly allocated DPI encoder, NULL on error
+ */
+struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ const struct panel_funcs *p_funcs)
+{
+ struct mdfld_dsi_dpi_output *dpi_output = NULL;
+ struct mdfld_dsi_config *dsi_config;
+ struct drm_connector *connector = NULL;
+ struct drm_encoder *encoder = NULL;
+ int pipe;
+ u32 data;
+ int ret;
+
+ pipe = dsi_connector->pipe;
+
+ if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
+ dsi_config = mdfld_dsi_get_config(dsi_connector);
+
+ /* panel hard-reset */
+ if (p_funcs->reset) {
+ ret = p_funcs->reset(pipe);
+ if (ret) {
+ DRM_ERROR("Panel %d hard-reset failed\n", pipe);
+ return NULL;
+ }
+ }
+
+ /* panel drvIC init */
+ if (p_funcs->drv_ic_init)
+ p_funcs->drv_ic_init(dsi_config, pipe);
+
+ /* panel power mode detect */
+ ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
+ if (ret) {
+ DRM_ERROR("Panel %d get power mode failed\n", pipe);
+ dsi_connector->status = connector_status_disconnected;
+ } else {
+ DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
+ dsi_connector->status = connector_status_connected;
+ }
+ }
+
+ dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
+ if (!dpi_output) {
+ DRM_ERROR("No memory\n");
+ return NULL;
+ }
+
+ if (dsi_connector->pipe)
+ dpi_output->panel_on = 0;
+ else
+ dpi_output->panel_on = 0;
+
+ dpi_output->dev = dev;
+ if (mdfld_get_panel_type(dev, pipe) != TC35876X)
+ dpi_output->p_funcs = p_funcs;
+ dpi_output->first_boot = 1;
+
+ /*get fixed mode*/
+ dsi_config = mdfld_dsi_get_config(dsi_connector);
+
+ /*create drm encoder object*/
+ connector = &dsi_connector->base.base;
+ encoder = &dpi_output->base.base.base;
+ drm_encoder_init(dev,
+ encoder,
+ p_funcs->encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(encoder,
+ p_funcs->encoder_helper_funcs);
+
+ /*attach to given connector*/
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ /*set possible crtcs and clones*/
+ if (dsi_connector->pipe) {
+ encoder->possible_crtcs = (1 << 2);
+ encoder->possible_clones = (1 << 1);
+ } else {
+ encoder->possible_crtcs = (1 << 0);
+ encoder->possible_clones = (1 << 0);
+ }
+
+ dsi_connector->base.encoder = &dpi_output->base.base;
+
+ return &dpi_output->base;
+}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
new file mode 100644
index 00000000000..2b40663e169
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DPI_H__
+#define __MDFLD_DSI_DPI_H__
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+struct mdfld_dsi_dpi_timing {
+ u16 hsync_count;
+ u16 hbp_count;
+ u16 hfp_count;
+ u16 hactive_count;
+ u16 vsync_count;
+ u16 vbp_count;
+ u16 vfp_count;
+};
+
+struct mdfld_dsi_dpi_output {
+ struct mdfld_dsi_encoder base;
+ struct drm_device *dev;
+
+ int panel_on;
+ int first_boot;
+
+ const struct panel_funcs *p_funcs;
+};
+
+#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\
+ container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
+
+/* Export functions */
+extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+ struct mdfld_dsi_dpi_timing *dpi_timing,
+ int num_lane, int bpp);
+extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ const struct panel_funcs *p_funcs);
+
+/* MDFLD DPI helper functions */
+extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
+extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
+ int pipe);
+extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
new file mode 100644
index 00000000000..6e91b20ce2e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/module.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "tc35876x-dsi-lvds.h"
+#include <linux/pm_runtime.h>
+#include <asm/intel_scu_ipc.h>
+
+/* get the LABC from command line. */
+static int LABC_control = 1;
+
+#ifdef MODULE
+module_param(LABC_control, int, 0644);
+#else
+
+static int __init parse_LABC_control(char *arg)
+{
+ /* LABC control can be passed in as a cmdline parameter */
+ /* to enable this feature add LABC=1 to cmdline */
+ /* to disable this feature add LABC=0 to cmdline */
+ if (!arg)
+ return -EINVAL;
+
+ if (!strcasecmp(arg, "0"))
+ LABC_control = 0;
+ else if (!strcasecmp(arg, "1"))
+ LABC_control = 1;
+
+ return 0;
+}
+early_param("LABC", parse_LABC_control);
+#endif
+
+/**
+ * Check and see if the generic control or data buffer is empty and ready.
+ */
+void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
+ u32 fifo_stat)
+{
+ u32 GEN_BF_time_out_count;
+
+ /* Check MIPI Adatper command registers */
+ for (GEN_BF_time_out_count = 0;
+ GEN_BF_time_out_count < GEN_FB_TIME_OUT;
+ GEN_BF_time_out_count++) {
+ if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
+ break;
+ udelay(100);
+ }
+
+ if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
+ DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n",
+ gen_fifo_stat_reg);
+}
+
+/**
+ * Manage the DSI MIPI keyboard and display brightness.
+ * FIXME: this is exported to OSPM code. should work out an specific
+ * display interface to OSPM.
+ */
+
+void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_get_pkg_sender(dsi_config);
+ struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
+ u32 gen_ctrl_val;
+
+ if (!sender) {
+ DRM_ERROR("No sender found\n");
+ return;
+ }
+
+ dev = sender->dev;
+ dev_priv = dev->dev_private;
+
+ /* Set default display backlight value to 85% (0xd8)*/
+ mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
+ true);
+
+ /* Set minimum brightness setting of CABC function to 20% (0x33)*/
+ mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true);
+
+ /* Enable backlight or/and LABC */
+ gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON |
+ BACKLIGHT_ON;
+ if (LABC_control == 1)
+ gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO
+ | GAMMA_AUTO;
+
+ if (LABC_control == 1)
+ gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
+
+ dev_priv->mipi_ctrl_display = gen_ctrl_val;
+
+ mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val,
+ 1, true);
+
+ mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true);
+}
+
+void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
+{
+ struct mdfld_dsi_pkg_sender *sender;
+ struct drm_psb_private *dev_priv;
+ struct mdfld_dsi_config *dsi_config;
+ u32 gen_ctrl_val = 0;
+ int p_type = TMD_VID;
+
+ if (!dev || (pipe != 0 && pipe != 2)) {
+ DRM_ERROR("Invalid parameter\n");
+ return;
+ }
+
+ p_type = mdfld_get_panel_type(dev, 0);
+
+ dev_priv = dev->dev_private;
+
+ if (pipe)
+ dsi_config = dev_priv->dsi_configs[1];
+ else
+ dsi_config = dev_priv->dsi_configs[0];
+
+ sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if (!sender) {
+ DRM_ERROR("No sender found\n");
+ return;
+ }
+
+ gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
+
+ dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n",
+ pipe, gen_ctrl_val);
+
+ if (p_type == TMD_VID) {
+ /* Set display backlight value */
+ mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness,
+ (u8)gen_ctrl_val, 1, true);
+ } else {
+ /* Set display backlight value */
+ mdfld_dsi_send_mcs_short(sender, write_display_brightness,
+ (u8)gen_ctrl_val, 1, true);
+
+ /* Enable backlight control */
+ if (level == 0)
+ gen_ctrl_val = 0;
+ else
+ gen_ctrl_val = dev_priv->mipi_ctrl_display;
+
+ mdfld_dsi_send_mcs_short(sender, write_ctrl_display,
+ (u8)gen_ctrl_val, 1, true);
+ }
+}
+
+static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
+ u8 dcs, u32 *data, bool hs)
+{
+ struct mdfld_dsi_pkg_sender *sender
+ = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if (!sender || !data) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs);
+}
+
+int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode,
+ bool hs)
+{
+ if (!dsi_config || !mode) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs);
+}
+
+/*
+ * NOTE: this function was used by OSPM.
+ * TODO: will be removed later, should work out display interfaces for OSPM
+ */
+void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
+ DRM_ERROR("Invalid parameters\n");
+ return;
+ }
+
+ mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+}
+
+static void mdfld_dsi_connector_save(struct drm_connector *connector)
+{
+}
+
+static void mdfld_dsi_connector_restore(struct drm_connector *connector)
+{
+}
+
+/* FIXME: start using the force parameter */
+static enum drm_connector_status
+mdfld_dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct mdfld_dsi_connector *dsi_connector
+ = mdfld_dsi_connector(connector);
+
+ dsi_connector->status = connector_status_connected;
+
+ return dsi_connector->status;
+}
+
+static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
+ bool centerechange;
+ uint64_t val;
+
+ if (!gma_crtc)
+ goto set_prop_error;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ goto set_prop_error;
+ }
+
+ if (drm_object_property_get_value(&connector->base, property, &val))
+ goto set_prop_error;
+
+ if (val == value)
+ goto set_prop_done;
+
+ if (drm_object_property_set_value(&connector->base,
+ property, value))
+ goto set_prop_error;
+
+ centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
+ (value == DRM_MODE_SCALE_NO_SCALE);
+
+ if (gma_crtc->saved_mode.hdisplay != 0 &&
+ gma_crtc->saved_mode.vdisplay != 0) {
+ if (centerechange) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &gma_crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->primary->fb))
+ goto set_prop_error;
+ } else {
+ struct drm_encoder_helper_funcs *funcs =
+ encoder->helper_private;
+ funcs->mode_set(encoder,
+ &gma_crtc->saved_mode,
+ &gma_crtc->saved_adjusted_mode);
+ }
+ }
+ } else if (!strcmp(property->name, "backlight") && encoder) {
+ if (drm_object_property_set_value(&connector->base, property,
+ value))
+ goto set_prop_error;
+ else
+ gma_backlight_set(encoder->dev, value);
+ }
+set_prop_done:
+ return 0;
+set_prop_error:
+ return -1;
+}
+
+static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_pkg_sender *sender;
+
+ if (!dsi_connector)
+ return;
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ sender = dsi_connector->pkg_sender;
+ mdfld_dsi_pkg_sender_destroy(sender);
+ kfree(dsi_connector);
+}
+
+static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+ struct drm_display_mode *dup_mode = NULL;
+ struct drm_device *dev = connector->dev;
+
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+
+ if (fixed_mode) {
+ dev_dbg(dev->dev, "fixed_mode %dx%d\n",
+ fixed_mode->hdisplay, fixed_mode->vdisplay);
+ dup_mode = drm_mode_duplicate(dev, fixed_mode);
+ drm_mode_probed_add(connector, dup_mode);
+ return 1;
+ }
+ DRM_ERROR("Didn't get any modes!\n");
+ return 0;
+}
+
+static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /**
+ * FIXME: current DC has no fitting unit, reject any mode setting
+ * request
+ * Will figure out a way to do up-scaling(pannel fitting) later.
+ **/
+ if (fixed_mode) {
+ if (mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (mode == connector->dpms)
+ return;
+
+ /*first, execute dpms*/
+
+ drm_helper_connector_dpms(connector, mode);
+}
+
+static struct drm_encoder *mdfld_dsi_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ return &dsi_config->encoder->base.base;
+}
+
+/*DSI connector funcs*/
+static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
+ .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+ .save = mdfld_dsi_connector_save,
+ .restore = mdfld_dsi_connector_restore,
+ .detect = mdfld_dsi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = mdfld_dsi_connector_set_property,
+ .destroy = mdfld_dsi_connector_destroy,
+};
+
+/*DSI connector helper funcs*/
+static const struct drm_connector_helper_funcs
+ mdfld_dsi_connector_helper_funcs = {
+ .get_modes = mdfld_dsi_connector_get_modes,
+ .mode_valid = mdfld_dsi_connector_mode_valid,
+ .best_encoder = mdfld_dsi_connector_best_encoder,
+};
+
+static int mdfld_dsi_get_default_config(struct drm_device *dev,
+ struct mdfld_dsi_config *config, int pipe)
+{
+ if (!dev || !config) {
+ DRM_ERROR("Invalid parameters");
+ return -EINVAL;
+ }
+
+ config->bpp = 24;
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ config->lane_count = 4;
+ else
+ config->lane_count = 2;
+ config->channel_num = 0;
+
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+ config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
+ else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ config->video_mode =
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS;
+ else
+ config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
+
+ return 0;
+}
+
+int mdfld_dsi_panel_reset(int pipe)
+{
+ unsigned gpio;
+ int ret = 0;
+
+ switch (pipe) {
+ case 0:
+ gpio = 128;
+ break;
+ case 2:
+ gpio = 34;
+ break;
+ default:
+ DRM_ERROR("Invalid output\n");
+ return -EINVAL;
+ }
+
+ ret = gpio_request(gpio, "gfx");
+ if (ret) {
+ DRM_ERROR("gpio_rqueset failed\n");
+ return ret;
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret) {
+ DRM_ERROR("gpio_direction_output failed\n");
+ goto gpio_error;
+ }
+
+ gpio_get_value(128);
+
+gpio_error:
+ if (gpio_is_valid(gpio))
+ gpio_free(gpio);
+
+ return ret;
+}
+
+/*
+ * MIPI output init
+ * @dev drm device
+ * @pipe pipe number. 0 or 2
+ * @config
+ *
+ * Do the initialization of a MIPI output, including create DRM mode objects
+ * initialization of DSI output on @pipe
+ */
+void mdfld_dsi_output_init(struct drm_device *dev,
+ int pipe,
+ const struct panel_funcs *p_vid_funcs)
+{
+ struct mdfld_dsi_config *dsi_config;
+ struct mdfld_dsi_connector *dsi_connector;
+ struct drm_connector *connector;
+ struct mdfld_dsi_encoder *encoder;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct panel_info dsi_panel_info;
+ u32 width_mm, height_mm;
+
+ dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
+
+ if (pipe != 0 && pipe != 2) {
+ DRM_ERROR("Invalid parameter\n");
+ return;
+ }
+
+ /*create a new connetor*/
+ dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
+ if (!dsi_connector) {
+ DRM_ERROR("No memory");
+ return;
+ }
+
+ dsi_connector->pipe = pipe;
+
+ dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
+ GFP_KERNEL);
+ if (!dsi_config) {
+ DRM_ERROR("cannot allocate memory for DSI config\n");
+ goto dsi_init_err0;
+ }
+ mdfld_dsi_get_default_config(dev, dsi_config, pipe);
+
+ dsi_connector->private = dsi_config;
+
+ dsi_config->changed = 1;
+ dsi_config->dev = dev;
+
+ dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
+ if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
+ goto dsi_init_err0;
+
+ width_mm = dsi_panel_info.width_mm;
+ height_mm = dsi_panel_info.height_mm;
+
+ dsi_config->mode = dsi_config->fixed_mode;
+ dsi_config->connector = dsi_connector;
+
+ if (!dsi_config->fixed_mode) {
+ DRM_ERROR("No pannel fixed mode was found\n");
+ goto dsi_init_err0;
+ }
+
+ if (pipe && dev_priv->dsi_configs[0]) {
+ dsi_config->dvr_ic_inited = 0;
+ dev_priv->dsi_configs[1] = dsi_config;
+ } else if (pipe == 0) {
+ dsi_config->dvr_ic_inited = 1;
+ dev_priv->dsi_configs[0] = dsi_config;
+ } else {
+ DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
+ goto dsi_init_err0;
+ }
+
+
+ connector = &dsi_connector->base.base;
+ drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->display_info.width_mm = width_mm;
+ connector->display_info.height_mm = height_mm;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*attach properties*/
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_object_attach_property(&connector->base,
+ dev_priv->backlight_property,
+ MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+ /*init DSI package sender on this output*/
+ if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
+ DRM_ERROR("Package Sender initialization failed on pipe %d\n",
+ pipe);
+ goto dsi_init_err0;
+ }
+
+ encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
+ if (!encoder) {
+ DRM_ERROR("Create DPI encoder failed\n");
+ goto dsi_init_err1;
+ }
+ encoder->private = dsi_config;
+ dsi_config->encoder = encoder;
+ encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
+ INTEL_OUTPUT_MIPI2;
+ drm_sysfs_connector_add(connector);
+ return;
+
+ /*TODO: add code to destroy outputs on error*/
+dsi_init_err1:
+ /*destroy sender*/
+ mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
+
+ drm_connector_cleanup(connector);
+
+ kfree(dsi_config->fixed_mode);
+ kfree(dsi_config);
+dsi_init_err0:
+ kfree(dsi_connector);
+}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
new file mode 100644
index 00000000000..5b646c1f0c3
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -0,0 +1,377 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_OUTPUT_H__
+#define __MDFLD_DSI_OUTPUT_H__
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "mdfld_output.h"
+
+#include <asm/intel-mid.h>
+
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define REG_FLD_MOD(reg, val, start, end) \
+ REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end))
+
+static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg,
+ u32 val, int start, int end)
+{
+ int t = 100000;
+
+ while (FLD_GET(REG_READ(reg), start, end) != val) {
+ if (--t == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+#define REG_FLD_WAIT(reg, val, start, end) \
+ REGISTER_FLD_WAIT(dev, reg, val, start, end)
+
+#define REG_BIT_WAIT(reg, val, bitnum) \
+ REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum)
+
+#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
+
+#ifdef DEBUG
+#define CHECK_PIPE(pipe) ({ \
+ const typeof(pipe) __pipe = (pipe); \
+ BUG_ON(__pipe != 0 && __pipe != 2); \
+ __pipe; })
+#else
+#define CHECK_PIPE(pipe) (pipe)
+#endif
+
+/*
+ * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2
+ */
+#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400)
+
+/* mdfld DSI controller registers */
+#define MIPI_DEVICE_READY_REG(pipe) (0xb000 + REG_OFFSET(pipe))
+#define MIPI_INTR_STAT_REG(pipe) (0xb004 + REG_OFFSET(pipe))
+#define MIPI_INTR_EN_REG(pipe) (0xb008 + REG_OFFSET(pipe))
+#define MIPI_DSI_FUNC_PRG_REG(pipe) (0xb00c + REG_OFFSET(pipe))
+#define MIPI_HS_TX_TIMEOUT_REG(pipe) (0xb010 + REG_OFFSET(pipe))
+#define MIPI_LP_RX_TIMEOUT_REG(pipe) (0xb014 + REG_OFFSET(pipe))
+#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe) (0xb018 + REG_OFFSET(pipe))
+#define MIPI_DEVICE_RESET_TIMER_REG(pipe) (0xb01c + REG_OFFSET(pipe))
+#define MIPI_DPI_RESOLUTION_REG(pipe) (0xb020 + REG_OFFSET(pipe))
+#define MIPI_DBI_FIFO_THROTTLE_REG(pipe) (0xb024 + REG_OFFSET(pipe))
+#define MIPI_HSYNC_COUNT_REG(pipe) (0xb028 + REG_OFFSET(pipe))
+#define MIPI_HBP_COUNT_REG(pipe) (0xb02c + REG_OFFSET(pipe))
+#define MIPI_HFP_COUNT_REG(pipe) (0xb030 + REG_OFFSET(pipe))
+#define MIPI_HACTIVE_COUNT_REG(pipe) (0xb034 + REG_OFFSET(pipe))
+#define MIPI_VSYNC_COUNT_REG(pipe) (0xb038 + REG_OFFSET(pipe))
+#define MIPI_VBP_COUNT_REG(pipe) (0xb03c + REG_OFFSET(pipe))
+#define MIPI_VFP_COUNT_REG(pipe) (0xb040 + REG_OFFSET(pipe))
+#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe) (0xb044 + REG_OFFSET(pipe))
+#define MIPI_DPI_CONTROL_REG(pipe) (0xb048 + REG_OFFSET(pipe))
+#define MIPI_DPI_DATA_REG(pipe) (0xb04c + REG_OFFSET(pipe))
+#define MIPI_INIT_COUNT_REG(pipe) (0xb050 + REG_OFFSET(pipe))
+#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe) (0xb054 + REG_OFFSET(pipe))
+#define MIPI_VIDEO_MODE_FORMAT_REG(pipe) (0xb058 + REG_OFFSET(pipe))
+#define MIPI_EOT_DISABLE_REG(pipe) (0xb05c + REG_OFFSET(pipe))
+#define MIPI_LP_BYTECLK_REG(pipe) (0xb060 + REG_OFFSET(pipe))
+#define MIPI_LP_GEN_DATA_REG(pipe) (0xb064 + REG_OFFSET(pipe))
+#define MIPI_HS_GEN_DATA_REG(pipe) (0xb068 + REG_OFFSET(pipe))
+#define MIPI_LP_GEN_CTRL_REG(pipe) (0xb06c + REG_OFFSET(pipe))
+#define MIPI_HS_GEN_CTRL_REG(pipe) (0xb070 + REG_OFFSET(pipe))
+#define MIPI_GEN_FIFO_STAT_REG(pipe) (0xb074 + REG_OFFSET(pipe))
+#define MIPI_HS_LS_DBI_ENABLE_REG(pipe) (0xb078 + REG_OFFSET(pipe))
+#define MIPI_DPHY_PARAM_REG(pipe) (0xb080 + REG_OFFSET(pipe))
+#define MIPI_DBI_BW_CTRL_REG(pipe) (0xb084 + REG_OFFSET(pipe))
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe) (0xb088 + REG_OFFSET(pipe))
+
+#define MIPI_CTRL_REG(pipe) (0xb104 + REG_OFFSET(pipe))
+#define MIPI_DATA_ADD_REG(pipe) (0xb108 + REG_OFFSET(pipe))
+#define MIPI_DATA_LEN_REG(pipe) (0xb10c + REG_OFFSET(pipe))
+#define MIPI_CMD_ADD_REG(pipe) (0xb110 + REG_OFFSET(pipe))
+#define MIPI_CMD_LEN_REG(pipe) (0xb114 + REG_OFFSET(pipe))
+
+/* non-uniform reg offset */
+#define MIPI_PORT_CONTROL(pipe) (CHECK_PIPE(pipe) ? MIPI_C : MIPI)
+
+#define DSI_DEVICE_READY (0x1)
+#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1)
+#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1)
+#define DSI_POWER_STATE_ULPS_OFFSET (0x1)
+
+
+#define DSI_ONE_DATA_LANE (0x1)
+#define DSI_TWO_DATA_LANE (0x2)
+#define DSI_THREE_DATA_LANE (0X3)
+#define DSI_FOUR_DATA_LANE (0x4)
+#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3)
+#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5)
+#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7)
+#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13)
+
+#define DSI_INTR_STATE_RXSOTERROR BIT(0)
+
+#define DSI_INTR_STATE_SPL_PKG_SENT BIT(30)
+#define DSI_INTR_STATE_TE BIT(31)
+
+#define DSI_HS_TX_TIMEOUT_MASK (0xffffff)
+
+#define DSI_LP_RX_TIMEOUT_MASK (0xffffff)
+
+#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f)
+
+#define DSI_RESET_TIMER_MASK (0xffff)
+
+#define DSI_DBI_FIFO_WM_HALF (0x0)
+#define DSI_DBI_FIFO_WM_QUARTER (0x1)
+#define DSI_DBI_FIFO_WM_LOW (0x2)
+
+#define DSI_DPI_TIMING_MASK (0xffff)
+
+#define DSI_INIT_TIMER_MASK (0xffff)
+
+#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff)
+
+#define DSI_LP_BYTECLK_MASK (0x0ffff)
+
+#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03)
+#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13)
+#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23)
+#define DSI_HS_CTRL_GEN_R0 (0x04)
+#define DSI_HS_CTRL_GEN_R1 (0x14)
+#define DSI_HS_CTRL_GEN_R2 (0x24)
+#define DSI_HS_CTRL_GEN_LONG_W (0x29)
+#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05)
+#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15)
+#define DSI_HS_CTRL_MCS_R0 (0x06)
+#define DSI_HS_CTRL_MCS_LONG_W (0x39)
+#define DSI_HS_CTRL_VC_OFFSET (0x06)
+#define DSI_HS_CTRL_WC_OFFSET (0x08)
+
+#define DSI_FIFO_GEN_HS_DATA_FULL BIT(0)
+#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY BIT(1)
+#define DSI_FIFO_GEN_HS_DATA_EMPTY BIT(2)
+#define DSI_FIFO_GEN_LP_DATA_FULL BIT(8)
+#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY BIT(9)
+#define DSI_FIFO_GEN_LP_DATA_EMPTY BIT(10)
+#define DSI_FIFO_GEN_HS_CTRL_FULL BIT(16)
+#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY BIT(17)
+#define DSI_FIFO_GEN_HS_CTRL_EMPTY BIT(18)
+#define DSI_FIFO_GEN_LP_CTRL_FULL BIT(24)
+#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY BIT(25)
+#define DSI_FIFO_GEN_LP_CTRL_EMPTY BIT(26)
+#define DSI_FIFO_DBI_EMPTY BIT(27)
+#define DSI_FIFO_DPI_EMPTY BIT(28)
+
+#define DSI_DBI_HS_LP_SWITCH_MASK (0x1)
+
+#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0)
+#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16)
+
+#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001)
+#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002)
+
+/*dsi power modes*/
+#define DSI_POWER_MODE_DISPLAY_ON BIT(2)
+#define DSI_POWER_MODE_NORMAL_ON BIT(3)
+#define DSI_POWER_MODE_SLEEP_OUT BIT(4)
+#define DSI_POWER_MODE_PARTIAL_ON BIT(5)
+#define DSI_POWER_MODE_IDLE_ON BIT(6)
+
+enum {
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
+ MDFLD_DSI_VIDEO_BURST_MODE = 3,
+};
+
+#define DSI_DPI_COMPLETE_LAST_LINE BIT(2)
+#define DSI_DPI_DISABLE_BTA BIT(3)
+
+struct mdfld_dsi_connector {
+ struct gma_connector base;
+
+ int pipe;
+ void *private;
+ void *pkg_sender;
+
+ /* Connection status */
+ enum drm_connector_status status;
+};
+
+struct mdfld_dsi_encoder {
+ struct gma_encoder base;
+ void *private;
+};
+
+/*
+ * DSI config, consists of one DSI connector, two DSI encoders.
+ * DRM will pick up on DSI encoder basing on differents configs.
+ */
+struct mdfld_dsi_config {
+ struct drm_device *dev;
+ struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *mode;
+
+ struct mdfld_dsi_connector *connector;
+ struct mdfld_dsi_encoder *encoder;
+
+ int changed;
+
+ int bpp;
+ int lane_count;
+ /*Virtual channel number for this encoder*/
+ int channel_num;
+ /*video mode configure*/
+ int video_mode;
+
+ int dvr_ic_inited;
+};
+
+static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
+ struct drm_connector *connector)
+{
+ struct gma_connector *gma_connector;
+
+ gma_connector = to_gma_connector(connector);
+
+ return container_of(gma_connector, struct mdfld_dsi_connector, base);
+}
+
+static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
+ struct drm_encoder *encoder)
+{
+ struct gma_encoder *gma_encoder;
+
+ gma_encoder = to_gma_encoder(encoder);
+
+ return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
+}
+
+static inline struct mdfld_dsi_config *
+ mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
+{
+ if (!connector)
+ return NULL;
+ return (struct mdfld_dsi_config *)connector->private;
+}
+
+static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
+{
+ struct mdfld_dsi_connector *dsi_connector;
+
+ if (!config)
+ return NULL;
+
+ dsi_connector = config->connector;
+
+ if (!dsi_connector)
+ return NULL;
+
+ return dsi_connector->pkg_sender;
+}
+
+static inline struct mdfld_dsi_config *
+ mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
+{
+ if (!encoder)
+ return NULL;
+ return (struct mdfld_dsi_config *)encoder->private;
+}
+
+static inline struct mdfld_dsi_connector *
+ mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_config *config;
+
+ if (!encoder)
+ return NULL;
+
+ config = mdfld_dsi_encoder_get_config(encoder);
+ if (!config)
+ return NULL;
+
+ return config->connector;
+}
+
+static inline void *mdfld_dsi_encoder_get_pkg_sender(
+ struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_config *dsi_config;
+
+ dsi_config = mdfld_dsi_encoder_get_config(encoder);
+ if (!dsi_config)
+ return NULL;
+
+ return mdfld_dsi_get_pkg_sender(dsi_config);
+}
+
+static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_connector *connector;
+
+ if (!encoder)
+ return -1;
+
+ connector = mdfld_dsi_encoder_get_connector(encoder);
+ if (!connector)
+ return -1;
+ return connector->pipe;
+}
+
+/* Export functions */
+extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
+ u32 gen_fifo_stat_reg, u32 fifo_stat);
+extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
+ int level);
+extern void mdfld_dsi_output_init(struct drm_device *dev,
+ int pipe,
+ const struct panel_funcs *p_vid_funcs);
+extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+
+extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+ u32 *mode, bool hs);
+extern int mdfld_dsi_panel_reset(int pipe);
+
+#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
new file mode 100644
index 00000000000..87885d8c06e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/freezer.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dpi.h"
+
+#define MDFLD_DSI_READ_MAX_COUNT 5000
+
+enum data_type {
+ DSI_DT_GENERIC_SHORT_WRITE_0 = 0x03,
+ DSI_DT_GENERIC_SHORT_WRITE_1 = 0x13,
+ DSI_DT_GENERIC_SHORT_WRITE_2 = 0x23,
+ DSI_DT_GENERIC_READ_0 = 0x04,
+ DSI_DT_GENERIC_READ_1 = 0x14,
+ DSI_DT_GENERIC_READ_2 = 0x24,
+ DSI_DT_GENERIC_LONG_WRITE = 0x29,
+ DSI_DT_DCS_SHORT_WRITE_0 = 0x05,
+ DSI_DT_DCS_SHORT_WRITE_1 = 0x15,
+ DSI_DT_DCS_READ = 0x06,
+ DSI_DT_DCS_LONG_WRITE = 0x39,
+};
+
+enum {
+ MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
+};
+
+enum {
+ MDFLD_DSI_PKG_SENDER_FREE = 0x0,
+ MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
+};
+
+static const char *const dsi_errors[] = {
+ "RX SOT Error",
+ "RX SOT Sync Error",
+ "RX EOT Sync Error",
+ "RX Escape Mode Entry Error",
+ "RX LP TX Sync Error",
+ "RX HS Receive Timeout Error",
+ "RX False Control Error",
+ "RX ECC Single Bit Error",
+ "RX ECC Multibit Error",
+ "RX Checksum Error",
+ "RX DSI Data Type Not Recognised",
+ "RX DSI VC ID Invalid",
+ "TX False Control Error",
+ "TX ECC Single Bit Error",
+ "TX ECC Multibit Error",
+ "TX Checksum Error",
+ "TX DSI Data Type Not Recognised",
+ "TX DSI VC ID invalid",
+ "High Contention",
+ "Low contention",
+ "DPI FIFO Under run",
+ "HS TX Timeout",
+ "LP RX Timeout",
+ "Turn Around ACK Timeout",
+ "ACK With No Error",
+ "RX Invalid TX Length",
+ "RX Prot Violation",
+ "HS Generic Write FIFO Full",
+ "LP Generic Write FIFO Full",
+ "Generic Read Data Avail"
+ "Special Packet Sent",
+ "Tearing Effect",
+};
+
+static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
+ u32 mask)
+{
+ struct drm_device *dev = sender->dev;
+ u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
+ int retry = 0xffff;
+
+ while (retry--) {
+ if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
+ return 0;
+ udelay(100);
+ }
+ DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
+ return -EIO;
+}
+
+static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) |
+ BIT(26) | BIT(27) | BIT(28)));
+}
+
+static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26)));
+}
+
+static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18)));
+}
+
+static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
+{
+ u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+ struct drm_device *dev = sender->dev;
+
+ dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask);
+
+ switch (mask) {
+ case BIT(0):
+ case BIT(1):
+ case BIT(2):
+ case BIT(3):
+ case BIT(4):
+ case BIT(5):
+ case BIT(6):
+ case BIT(7):
+ case BIT(8):
+ case BIT(9):
+ case BIT(10):
+ case BIT(11):
+ case BIT(12):
+ case BIT(13):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ case BIT(14):
+ /*wait for all fifo empty*/
+ /*wait_for_all_fifos_empty(sender)*/
+ break;
+ case BIT(15):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ case BIT(16):
+ break;
+ case BIT(17):
+ break;
+ case BIT(18):
+ case BIT(19):
+ dev_dbg(sender->dev->dev, "High/Low contention detected\n");
+ /*wait for contention recovery time*/
+ /*mdelay(10);*/
+ /*wait for all fifo empty*/
+ if (0)
+ wait_for_all_fifos_empty(sender);
+ break;
+ case BIT(20):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ case BIT(21):
+ /*wait for all fifo empty*/
+ /*wait_for_all_fifos_empty(sender);*/
+ break;
+ case BIT(22):
+ break;
+ case BIT(23):
+ case BIT(24):
+ case BIT(25):
+ case BIT(26):
+ case BIT(27):
+ dev_dbg(sender->dev->dev, "HS Gen fifo full\n");
+ REG_WRITE(intr_stat_reg, mask);
+ wait_for_hs_fifos_empty(sender);
+ break;
+ case BIT(28):
+ dev_dbg(sender->dev->dev, "LP Gen fifo full\n");
+ REG_WRITE(intr_stat_reg, mask);
+ wait_for_lp_fifos_empty(sender);
+ break;
+ case BIT(29):
+ case BIT(30):
+ case BIT(31):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ }
+
+ if (mask & REG_READ(intr_stat_reg))
+ dev_dbg(sender->dev->dev,
+ "Cannot clean interrupt 0x%08x\n", mask);
+ return 0;
+}
+
+static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
+{
+ struct drm_device *dev = sender->dev;
+ u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+ u32 mask;
+ u32 intr_stat;
+ int i;
+ int err = 0;
+
+ intr_stat = REG_READ(intr_stat_reg);
+
+ for (i = 0; i < 32; i++) {
+ mask = (0x00000001UL) << i;
+ if (intr_stat & mask) {
+ dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]);
+ err = handle_dsi_error(sender, mask);
+ if (err)
+ DRM_ERROR("Cannot handle error\n");
+ }
+ }
+ return err;
+}
+
+static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 cmd, u8 param, bool hs)
+{
+ struct drm_device *dev = sender->dev;
+ u32 ctrl_reg;
+ u32 val;
+ u8 virtual_channel = 0;
+
+ if (hs) {
+ ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+
+ /* FIXME: wait_for_hs_fifos_empty(sender); */
+ } else {
+ ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+
+ /* FIXME: wait_for_lp_fifos_empty(sender); */
+ }
+
+ val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) |
+ FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0);
+
+ REG_WRITE(ctrl_reg, val);
+
+ return 0;
+}
+
+static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, int len, bool hs)
+{
+ struct drm_device *dev = sender->dev;
+ u32 ctrl_reg;
+ u32 data_reg;
+ u32 val;
+ u8 *p;
+ u8 b1, b2, b3, b4;
+ u8 virtual_channel = 0;
+ int i;
+
+ if (hs) {
+ ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+ data_reg = sender->mipi_hs_gen_data_reg;
+
+ /* FIXME: wait_for_hs_fifos_empty(sender); */
+ } else {
+ ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+ data_reg = sender->mipi_lp_gen_data_reg;
+
+ /* FIXME: wait_for_lp_fifos_empty(sender); */
+ }
+
+ p = data;
+ for (i = 0; i < len / 4; i++) {
+ b1 = *p++;
+ b2 = *p++;
+ b3 = *p++;
+ b4 = *p++;
+
+ REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1);
+ }
+
+ i = len % 4;
+ if (i) {
+ b1 = 0; b2 = 0; b3 = 0;
+
+ switch (i) {
+ case 3:
+ b1 = *p++;
+ b2 = *p++;
+ b3 = *p++;
+ break;
+ case 2:
+ b1 = *p++;
+ b2 = *p++;
+ break;
+ case 1:
+ b1 = *p++;
+ break;
+ }
+
+ REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1);
+ }
+
+ val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) |
+ FLD_VAL(data_type, 5, 0);
+
+ REG_WRITE(ctrl_reg, val);
+
+ return 0;
+}
+
+static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len)
+{
+ u8 cmd;
+
+ switch (data_type) {
+ case DSI_DT_DCS_SHORT_WRITE_0:
+ case DSI_DT_DCS_SHORT_WRITE_1:
+ case DSI_DT_DCS_LONG_WRITE:
+ cmd = *data;
+ break;
+ default:
+ return 0;
+ }
+
+ /*this prevents other package sending while doing msleep*/
+ sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
+
+ /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
+ if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ }
+
+ if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ }
+ return 0;
+}
+
+static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len)
+{
+ u8 cmd;
+
+ switch (data_type) {
+ case DSI_DT_DCS_SHORT_WRITE_0:
+ case DSI_DT_DCS_SHORT_WRITE_1:
+ case DSI_DT_DCS_LONG_WRITE:
+ cmd = *data;
+ break;
+ default:
+ return 0;
+ }
+
+ /*update panel status*/
+ if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ } else if (unlikely(cmd == DCS_SOFT_RESET)) {
+ /*TODO: replace it with msleep later*/
+ mdelay(5);
+ }
+
+ sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+ return 0;
+}
+
+static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len, bool hs)
+{
+ int ret;
+
+ /*handle DSI error*/
+ ret = dsi_error_handler(sender);
+ if (ret) {
+ DRM_ERROR("Error handling failed\n");
+ return -EAGAIN;
+ }
+
+ /* send pkg */
+ if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
+ DRM_ERROR("sender is busy\n");
+ return -EAGAIN;
+ }
+
+ ret = send_pkg_prepare(sender, data_type, data, len);
+ if (ret) {
+ DRM_ERROR("send_pkg_prepare error\n");
+ return ret;
+ }
+
+ switch (data_type) {
+ case DSI_DT_GENERIC_SHORT_WRITE_0:
+ case DSI_DT_GENERIC_SHORT_WRITE_1:
+ case DSI_DT_GENERIC_SHORT_WRITE_2:
+ case DSI_DT_GENERIC_READ_0:
+ case DSI_DT_GENERIC_READ_1:
+ case DSI_DT_GENERIC_READ_2:
+ case DSI_DT_DCS_SHORT_WRITE_0:
+ case DSI_DT_DCS_SHORT_WRITE_1:
+ case DSI_DT_DCS_READ:
+ ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
+ break;
+ case DSI_DT_GENERIC_LONG_WRITE:
+ case DSI_DT_DCS_LONG_WRITE:
+ ret = send_long_pkg(sender, data_type, data, len, hs);
+ break;
+ }
+
+ send_pkg_done(sender, data_type, data, len);
+
+ /*FIXME: should I query complete and fifo empty here?*/
+
+ return ret;
+}
+
+int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs)
+{
+ unsigned long flags;
+
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u8 param, u8 param_num, bool hs)
+{
+ u8 data[2];
+ unsigned long flags;
+ u8 data_type;
+
+ if (!sender) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ data[0] = cmd;
+
+ if (param_num) {
+ data_type = DSI_DT_DCS_SHORT_WRITE_1;
+ data[1] = param;
+ } else {
+ data_type = DSI_DT_DCS_SHORT_WRITE_0;
+ data[1] = 0;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, data_type, data, sizeof(data), hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
+ u8 param1, u8 param_num, bool hs)
+{
+ u8 data[2];
+ unsigned long flags;
+ u8 data_type;
+
+ if (!sender || param_num > 2) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ switch (param_num) {
+ case 0:
+ data_type = DSI_DT_GENERIC_SHORT_WRITE_0;
+ data[0] = 0;
+ data[1] = 0;
+ break;
+ case 1:
+ data_type = DSI_DT_GENERIC_SHORT_WRITE_1;
+ data[0] = param0;
+ data[1] = 0;
+ break;
+ case 2:
+ data_type = DSI_DT_GENERIC_SHORT_WRITE_2;
+ data[0] = param0;
+ data[1] = param1;
+ break;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, data_type, data, sizeof(data), hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs)
+{
+ unsigned long flags;
+
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
+{
+ unsigned long flags;
+ struct drm_device *dev = sender->dev;
+ int i;
+ u32 gen_data_reg;
+ int retry = MDFLD_DSI_READ_MAX_COUNT;
+
+ if (!sender || !data_out || !len_out) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ /**
+ * do reading.
+ * 0) send out generic read request
+ * 1) polling read data avail interrupt
+ * 2) read data
+ */
+ spin_lock_irqsave(&sender->lock, flags);
+
+ REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
+
+ if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29)))
+ DRM_ERROR("Can NOT clean read data valid interrupt\n");
+
+ /*send out read request*/
+ send_pkg(sender, data_type, data, len, hs);
+
+ /*polling read data avail interrupt*/
+ while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) {
+ udelay(100);
+ retry--;
+ }
+
+ if (!retry) {
+ spin_unlock_irqrestore(&sender->lock, flags);
+ return -ETIMEDOUT;
+ }
+
+ REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
+
+ /*read data*/
+ if (hs)
+ gen_data_reg = sender->mipi_hs_gen_data_reg;
+ else
+ gen_data_reg = sender->mipi_lp_gen_data_reg;
+
+ for (i = 0; i < len_out; i++)
+ *(data_out + i) = REG_READ(gen_data_reg);
+
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u32 *data, u16 len, bool hs)
+{
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1,
+ data, len, hs);
+}
+
+int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+ int pipe)
+{
+ struct mdfld_dsi_pkg_sender *pkg_sender;
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 mipi_val = 0;
+
+ if (!dsi_connector) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ pkg_sender = dsi_connector->pkg_sender;
+
+ if (!pkg_sender || IS_ERR(pkg_sender)) {
+ pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
+ GFP_KERNEL);
+ if (!pkg_sender) {
+ DRM_ERROR("Create DSI pkg sender failed\n");
+ return -ENOMEM;
+ }
+ dsi_connector->pkg_sender = (void *)pkg_sender;
+ }
+
+ pkg_sender->dev = dev;
+ pkg_sender->dsi_connector = dsi_connector;
+ pkg_sender->pipe = pipe;
+ pkg_sender->pkg_num = 0;
+ pkg_sender->panel_mode = 0;
+ pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+ /*init regs*/
+ /* FIXME: should just copy the regmap ptr ? */
+ pkg_sender->dpll_reg = map->dpll;
+ pkg_sender->dspcntr_reg = map->cntr;
+ pkg_sender->pipeconf_reg = map->conf;
+ pkg_sender->dsplinoff_reg = map->linoff;
+ pkg_sender->dspsurf_reg = map->surf;
+ pkg_sender->pipestat_reg = map->status;
+
+ pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
+ pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
+ pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
+ pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe);
+ pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
+ pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe);
+ pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe);
+ pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe);
+ pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe);
+
+ /*init lock*/
+ spin_lock_init(&pkg_sender->lock);
+
+ if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
+ /**
+ * For video mode, don't enable DPI timing output here,
+ * will init the DPI timing output during mode setting.
+ */
+ mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+ if (pipe == 0)
+ mipi_val |= 0x2;
+
+ REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val);
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+
+ /* do dsi controller init */
+ mdfld_dsi_controller_init(dsi_config, pipe);
+ }
+
+ return 0;
+}
+
+void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
+{
+ if (!sender || IS_ERR(sender))
+ return;
+
+ /*free*/
+ kfree(sender);
+}
+
+
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
new file mode 100644
index 00000000000..459cd7ea8b8
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+#ifndef __MDFLD_DSI_PKG_SENDER_H__
+#define __MDFLD_DSI_PKG_SENDER_H__
+
+#include <linux/kthread.h>
+
+#define MDFLD_MAX_DCS_PARAM 8
+
+struct mdfld_dsi_pkg_sender {
+ struct drm_device *dev;
+ struct mdfld_dsi_connector *dsi_connector;
+ u32 status;
+ u32 panel_mode;
+
+ int pipe;
+
+ spinlock_t lock;
+
+ u32 pkg_num;
+
+ /* Registers */
+ u32 dpll_reg;
+ u32 dspcntr_reg;
+ u32 pipeconf_reg;
+ u32 pipestat_reg;
+ u32 dsplinoff_reg;
+ u32 dspsurf_reg;
+
+ u32 mipi_intr_stat_reg;
+ u32 mipi_lp_gen_data_reg;
+ u32 mipi_hs_gen_data_reg;
+ u32 mipi_lp_gen_ctrl_reg;
+ u32 mipi_hs_gen_ctrl_reg;
+ u32 mipi_gen_fifo_stat_reg;
+ u32 mipi_data_addr_reg;
+ u32 mipi_data_len_reg;
+ u32 mipi_cmd_addr_reg;
+ u32 mipi_cmd_len_reg;
+};
+
+/* DCS definitions */
+#define DCS_SOFT_RESET 0x01
+#define DCS_ENTER_SLEEP_MODE 0x10
+#define DCS_EXIT_SLEEP_MODE 0x11
+#define DCS_SET_DISPLAY_OFF 0x28
+#define DCS_SET_DISPLAY_ON 0x29
+#define DCS_SET_COLUMN_ADDRESS 0x2a
+#define DCS_SET_PAGE_ADDRESS 0x2b
+#define DCS_WRITE_MEM_START 0x2c
+#define DCS_SET_TEAR_OFF 0x34
+#define DCS_SET_TEAR_ON 0x35
+
+extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+ int pipe);
+extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
+int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u8 param, u8 param_num, bool hs);
+int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs);
+int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
+ u8 param1, u8 param_num, bool hs);
+int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs);
+/* Read interfaces */
+int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u32 *data, u16 len, bool hs);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
new file mode 100644
index 00000000000..8cc8a5abbc7
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -0,0 +1,1035 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "psb_intel_reg.h"
+#include "gma_display.h"
+#include "framebuffer.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+
+/* Hardcoded currently */
+static int ksel = KSEL_CRYSTAL_19;
+
+struct psb_intel_range_t {
+ int min, max;
+};
+
+struct mrst_limit_t {
+ struct psb_intel_range_t dot, m, p1;
+};
+
+struct mrst_clock_t {
+ /* derived values */
+ int dot;
+ int m;
+ int p1;
+};
+
+#define COUNT_MAX 0x10000000
+
+void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ int count, temp;
+
+ switch (pipe) {
+ case 0:
+ case 1:
+ case 2:
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return;
+ }
+
+ /* FIXME JLIU7_PO */
+ gma_wait_for_vblank(dev);
+ return;
+
+ /* Wait for for the pipe disable to take effect. */
+ for (count = 0; count < COUNT_MAX; count++) {
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_PIPE_STATE) == 0)
+ break;
+ }
+}
+
+void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ int count, temp;
+
+ switch (pipe) {
+ case 0:
+ case 1:
+ case 2:
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return;
+ }
+
+ /* FIXME JLIU7_PO */
+ gma_wait_for_vblank(dev);
+ return;
+
+ /* Wait for for the pipe enable to take effect. */
+ for (count = 0; count < COUNT_MAX; count++) {
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_PIPE_STATE) == 1)
+ break;
+ }
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ return (pfit_control >> 29) & 0x3;
+}
+
+static struct drm_device globle_dev;
+
+void mdfld__intel_plane_set_alpha(int enable)
+{
+ struct drm_device *dev = &globle_dev;
+ int dspcntr_reg = DSPACNTR;
+ u32 dspcntr;
+
+ dspcntr = REG_READ(dspcntr_reg);
+
+ if (enable) {
+ dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
+ dspcntr |= DISPPLANE_32BPP;
+ } else {
+ dspcntr &= ~DISPPLANE_32BPP;
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ }
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+}
+
+static int check_fb(struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return 0;
+
+ switch (fb->bits_per_pixel) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ DRM_ERROR("Unknown color depth\n");
+ return -EINVAL;
+ }
+}
+
+static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ unsigned long start, offset;
+ u32 dspcntr;
+ int ret;
+
+ memcpy(&globle_dev, dev, sizeof(struct drm_device));
+
+ dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
+
+ /* no fb bound */
+ if (!crtc->primary->fb) {
+ dev_dbg(dev->dev, "No FB bound\n");
+ return 0;
+ }
+
+ ret = check_fb(crtc->primary->fb);
+ if (ret)
+ return ret;
+
+ if (pipe > 2) {
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return -EINVAL;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ start = psbfb->gtt->offset;
+ offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
+
+ REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
+ dspcntr = REG_READ(map->cntr);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->primary->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ }
+ REG_WRITE(map->cntr, dspcntr);
+
+ dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
+ start, offset, x, y);
+ REG_WRITE(map->linoff, offset);
+ REG_READ(map->linoff);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+/*
+ * Disable the pipe, plane and pll.
+ *
+ */
+void mdfld_disable_crtc(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 temp;
+
+ dev_dbg(dev->dev, "pipe = %d\n", pipe);
+
+
+ if (pipe != 1)
+ mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
+ HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+ /* Disable display plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
+ }
+
+ /* FIXME_JLIU7 MDFLD_PO revisit */
+
+ /* Next, disable display pipes */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ temp &= ~PIPEACONF_ENABLE;
+ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
+
+ /* Wait for for the pipe disable to take effect. */
+ mdfldWaitForPipeDisable(dev, pipe);
+ }
+
+ temp = REG_READ(map->dpll);
+ if (temp & DPLL_VCO_ENABLE) {
+ if ((pipe != 1 &&
+ !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
+ & PIPEACONF_ENABLE)) || pipe == 1) {
+ temp &= ~(DPLL_VCO_ENABLE);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to turn off. */
+ /* FIXME_MDFLD PO may need more delay */
+ udelay(500);
+
+ if (!(temp & MDFLD_PWR_GATE_EN)) {
+ /* gating power of DPLL */
+ REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(5000);
+ }
+ }
+ }
+
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 pipeconf = dev_priv->pipeconf[pipe];
+ u32 temp;
+ int timeout = 0;
+
+ dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
+
+ /* Note: Old code uses pipe a stat for pipe b but that appears
+ to be a bug */
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(map->dpll);
+
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ /* When ungating power of DPLL, needs to wait 0.5us
+ before enable the VCO */
+ if (temp & MDFLD_PWR_GATE_EN) {
+ temp &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(map->dpll, temp);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+
+ /**
+ * wait for DSI PLL to lock
+ * NOTE: only need to poll status of pipe 0 and pipe 1,
+ * since both MIPI pipes share the same PLL.
+ */
+ while ((pipe != 2) && (timeout < 20000) &&
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout++;
+ }
+ }
+
+ /* Enable the plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(map->cntr,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ }
+
+ /* Enable the pipe */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(map->conf, pipeconf);
+
+ /* Wait for for the pipe enable to take effect. */
+ mdfldWaitForPipeEnable(dev, pipe);
+ }
+
+ /*workaround for sighting 3741701 Random X blank display*/
+ /*perform w/a in video mode only on pipe A or C*/
+ if (pipe == 0 || pipe == 2) {
+ REG_WRITE(map->status, REG_READ(map->status));
+ msleep(100);
+ if (PIPE_VBLANK_STATUS & REG_READ(map->status))
+ dev_dbg(dev->dev, "OK");
+ else {
+ dev_dbg(dev->dev, "STUCK!!!!");
+ /*shutdown controller*/
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ REG_WRITE(map->base, REG_READ(map->base));
+ /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
+ REG_WRITE(0xb048, 1);
+ msleep(100);
+ temp = REG_READ(map->conf);
+ temp &= ~PIPEACONF_ENABLE;
+ REG_WRITE(map->conf, temp);
+ msleep(100); /*wait for pipe disable*/
+ REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
+ msleep(100);
+ REG_WRITE(0xb004, REG_READ(0xb004));
+ /* try to bring the controller back up again*/
+ REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
+ temp | DISPLAY_PLANE_ENABLE);
+ REG_WRITE(map->base, REG_READ(map->base));
+ /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
+ REG_WRITE(0xb048, 2);
+ msleep(100);
+ temp = REG_READ(map->conf);
+ temp |= PIPEACONF_ENABLE;
+ REG_WRITE(map->conf, temp);
+ }
+ }
+
+ gma_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+ if (pipe != 1)
+ mdfld_dsi_gen_fifo_ready(dev,
+ MIPI_GEN_FIFO_STAT_REG(pipe),
+ HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable display plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
+ }
+
+ /* Next, disable display pipes */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ temp &= ~PIPEACONF_ENABLE;
+ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
+
+ /* Wait for for the pipe disable to take effect. */
+ mdfldWaitForPipeDisable(dev, pipe);
+ }
+
+ temp = REG_READ(map->dpll);
+ if (temp & DPLL_VCO_ENABLE) {
+ if ((pipe != 1 && !((REG_READ(PIPEACONF)
+ | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
+ || pipe == 1) {
+ temp &= ~(DPLL_VCO_ENABLE);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to turn off. */
+ /* FIXME_MDFLD PO may need more delay */
+ udelay(500);
+ }
+ }
+ break;
+ }
+ gma_power_end(dev);
+}
+
+
+#define MDFLD_LIMT_DPLL_19 0
+#define MDFLD_LIMT_DPLL_25 1
+#define MDFLD_LIMT_DPLL_83 2
+#define MDFLD_LIMT_DPLL_100 3
+#define MDFLD_LIMT_DSIPLL_19 4
+#define MDFLD_LIMT_DSIPLL_25 5
+#define MDFLD_LIMT_DSIPLL_83 6
+#define MDFLD_LIMT_DSIPLL_100 7
+
+#define MDFLD_DOT_MIN 19750
+#define MDFLD_DOT_MAX 120000
+#define MDFLD_DPLL_M_MIN_19 113
+#define MDFLD_DPLL_M_MAX_19 155
+#define MDFLD_DPLL_P1_MIN_19 2
+#define MDFLD_DPLL_P1_MAX_19 10
+#define MDFLD_DPLL_M_MIN_25 101
+#define MDFLD_DPLL_M_MAX_25 130
+#define MDFLD_DPLL_P1_MIN_25 2
+#define MDFLD_DPLL_P1_MAX_25 10
+#define MDFLD_DPLL_M_MIN_83 64
+#define MDFLD_DPLL_M_MAX_83 64
+#define MDFLD_DPLL_P1_MIN_83 2
+#define MDFLD_DPLL_P1_MAX_83 2
+#define MDFLD_DPLL_M_MIN_100 64
+#define MDFLD_DPLL_M_MAX_100 64
+#define MDFLD_DPLL_P1_MIN_100 2
+#define MDFLD_DPLL_P1_MAX_100 2
+#define MDFLD_DSIPLL_M_MIN_19 131
+#define MDFLD_DSIPLL_M_MAX_19 175
+#define MDFLD_DSIPLL_P1_MIN_19 3
+#define MDFLD_DSIPLL_P1_MAX_19 8
+#define MDFLD_DSIPLL_M_MIN_25 97
+#define MDFLD_DSIPLL_M_MAX_25 140
+#define MDFLD_DSIPLL_P1_MIN_25 3
+#define MDFLD_DSIPLL_P1_MAX_25 9
+#define MDFLD_DSIPLL_M_MIN_83 33
+#define MDFLD_DSIPLL_M_MAX_83 92
+#define MDFLD_DSIPLL_P1_MIN_83 2
+#define MDFLD_DSIPLL_P1_MAX_83 3
+#define MDFLD_DSIPLL_M_MIN_100 97
+#define MDFLD_DSIPLL_M_MAX_100 140
+#define MDFLD_DSIPLL_P1_MIN_100 3
+#define MDFLD_DSIPLL_P1_MAX_100 9
+
+static const struct mrst_limit_t mdfld_limits[] = {
+ { /* MDFLD_LIMT_DPLL_19 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
+ },
+ { /* MDFLD_LIMT_DPLL_25 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
+ },
+ { /* MDFLD_LIMT_DPLL_83 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
+ },
+ { /* MDFLD_LIMT_DPLL_100 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
+ },
+ { /* MDFLD_LIMT_DSIPLL_19 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
+ },
+ { /* MDFLD_LIMT_DSIPLL_25 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
+ },
+ { /* MDFLD_LIMT_DSIPLL_83 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
+ },
+ { /* MDFLD_LIMT_DSIPLL_100 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
+ },
+};
+
+#define MDFLD_M_MIN 21
+#define MDFLD_M_MAX 180
+static const u32 mdfld_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+ 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
+ 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
+ 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
+ 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
+ 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
+ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
+ 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
+ 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
+ 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
+ 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
+ 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
+ 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
+ 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
+ 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
+};
+
+static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
+{
+ const struct mrst_limit_t *limit = NULL;
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+ || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
+ else if (ksel == KSEL_BYPASS_25)
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 166))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 ||
+ dev_priv->core_freq == 200))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
+ else if (ksel == KSEL_BYPASS_25)
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 166))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 ||
+ dev_priv->core_freq == 200))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
+ } else {
+ limit = NULL;
+ dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
+ }
+
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
+{
+ clock->dot = (refclk * clock->m) / clock->p1;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE. Divisor values are the actual divisors for
+ */
+static bool
+mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+ struct mrst_clock_t *best_clock)
+{
+ struct mrst_clock_t clock;
+ const struct mrst_limit_t *limit = mdfld_limit(crtc);
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ mdfld_clock(refclk, &clock);
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ return err != target;
+}
+
+static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ int refclk = 0;
+ int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
+ clk_tmp = 0;
+ struct mrst_clock_t clock;
+ bool ok;
+ u32 dpll = 0, fp = 0;
+ bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct gma_encoder *gma_encoder = NULL;
+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int timeout = 0;
+ int ret;
+
+ dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
+
+#if 0
+ if (pipe == 1) {
+ if (!gma_power_begin(dev, true))
+ return 0;
+ android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
+ x, y, old_fb);
+ goto mrst_crtc_mode_set_exit;
+ }
+#endif
+
+ ret = check_fb(crtc->primary->fb);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
+ adjusted_mode->hdisplay);
+ dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
+ adjusted_mode->vdisplay);
+ dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
+ adjusted_mode->hsync_start);
+ dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
+ adjusted_mode->hsync_end);
+ dev_dbg(dev->dev, "adjusted_htotal = %d\n",
+ adjusted_mode->htotal);
+ dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
+ adjusted_mode->vsync_start);
+ dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
+ adjusted_mode->vsync_end);
+ dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
+ adjusted_mode->vtotal);
+ dev_dbg(dev->dev, "adjusted_clock = %d\n",
+ adjusted_mode->clock);
+ dev_dbg(dev->dev, "hdisplay = %d\n",
+ mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay = %d\n",
+ mode->vdisplay);
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ memcpy(&gma_crtc->saved_mode, mode,
+ sizeof(struct drm_display_mode));
+ memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
+ sizeof(struct drm_display_mode));
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector)
+ continue;
+
+ encoder = connector->encoder;
+
+ if (!encoder)
+ continue;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ gma_encoder = gma_attached_encoder(connector);
+
+ switch (gma_encoder->type) {
+ case INTEL_OUTPUT_MIPI:
+ is_mipi = true;
+ break;
+ case INTEL_OUTPUT_MIPI2:
+ is_mipi2 = true;
+ break;
+ case INTEL_OUTPUT_HDMI:
+ is_hdmi = true;
+ break;
+ }
+ }
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ if (pipe == 1) {
+ /* FIXME: To make HDMI display with 864x480 (TPO), 480x864
+ * (PYR) or 480x854 (TMD), set the sprite width/height and
+ * souce image size registers with the adjusted mode for
+ * pipe B.
+ */
+
+ /*
+ * The defined sprite rectangle must always be completely
+ * contained within the displayable area of the screen image
+ * (frame buffer).
+ */
+ REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+ | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
+ /* Set the CRTC with encoder mode. */
+ REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
+ | (mode->crtc_vdisplay - 1));
+ } else {
+ REG_WRITE(map->size,
+ ((mode->crtc_vdisplay - 1) << 16) |
+ (mode->crtc_hdisplay - 1));
+ REG_WRITE(map->src,
+ ((mode->crtc_hdisplay - 1) << 16) |
+ (mode->crtc_vdisplay - 1));
+ }
+
+ REG_WRITE(map->pos, 0);
+
+ if (gma_encoder)
+ drm_object_property_get_value(&connector->base,
+ dev->mode_config.scaling_mode_property, &scalingType);
+
+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+ /* Medfield doesn't have register support for centering so we
+ * need to mess with the h/vblank and h/vsync start and ends
+ * to get centering
+ */
+ int offsetX = 0, offsetY = 0;
+
+ offsetX = (adjusted_mode->crtc_hdisplay -
+ mode->crtc_hdisplay) / 2;
+ offsetY = (adjusted_mode->crtc_vdisplay -
+ mode->crtc_vdisplay) / 2;
+
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
+ offsetX - 1) |
+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
+ offsetX - 1) |
+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
+ offsetY - 1) |
+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
+ offsetY - 1) |
+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+ } else {
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ }
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* setup pipeconf */
+ dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+
+ /* Set up the display plane register */
+ dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
+ dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
+ dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
+
+ if (is_mipi2)
+ goto mrst_crtc_mode_set_exit;
+ clk = adjusted_mode->clock;
+
+ if (is_hdmi) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
+ refclk = 19200;
+
+ if (is_mipi || is_mipi2)
+ clk_n = 1, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 1, clk_p2 = 10;
+ } else if (ksel == KSEL_BYPASS_25) {
+ refclk = 25000;
+
+ if (is_mipi || is_mipi2)
+ clk_n = 1, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 1, clk_p2 = 10;
+ } else if ((ksel == KSEL_BYPASS_83_100) &&
+ dev_priv->core_freq == 166) {
+ refclk = 83000;
+
+ if (is_mipi || is_mipi2)
+ clk_n = 4, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 4, clk_p2 = 10;
+ } else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 ||
+ dev_priv->core_freq == 200)) {
+ refclk = 100000;
+ if (is_mipi || is_mipi2)
+ clk_n = 4, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 4, clk_p2 = 10;
+ }
+
+ if (is_mipi)
+ clk_byte = dev_priv->bpp / 8;
+ else if (is_mipi2)
+ clk_byte = dev_priv->bpp2 / 8;
+
+ clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+
+ dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
+ clk, clk_n, clk_p2);
+ dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
+ adjusted_mode->clock, clk_tmp);
+
+ ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
+
+ if (!ok) {
+ DRM_ERROR
+ ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
+ } else {
+ m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
+
+ dev_dbg(dev->dev, "dot clock = %d,"
+ "m = %d, p1 = %d, m_conv = %d.\n",
+ clock.dot, clock.m,
+ clock.p1, m_conv);
+ }
+
+ dpll = REG_READ(map->dpll);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ dpll &= ~DPLL_VCO_ENABLE;
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
+
+ /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ /* reset M1, N1 & P1 */
+ REG_WRITE(map->fp0, 0);
+ dpll &= ~MDFLD_P1_MASK;
+ REG_WRITE(map->dpll, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ /* When ungating power of DPLL, needs to wait 0.5us before
+ * enable the VCO */
+ if (dpll & MDFLD_PWR_GATE_EN) {
+ dpll &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(map->dpll, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+ dpll = 0;
+
+#if 0 /* FIXME revisit later */
+ if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
+ ksel == KSEL_BYPASS_25)
+ dpll &= ~MDFLD_INPUT_REF_SEL;
+ else if (ksel == KSEL_BYPASS_83_100)
+ dpll |= MDFLD_INPUT_REF_SEL;
+#endif /* FIXME revisit later */
+
+ if (is_hdmi)
+ dpll |= MDFLD_VCO_SEL;
+
+ fp = (clk_n / 2) << 16;
+ fp |= m_conv;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 2)) << 17;
+
+#if 0 /* 1080p30 & 720p */
+ dpll = 0x00050000;
+ fp = 0x000001be;
+#endif
+#if 0 /* 480p */
+ dpll = 0x02010000;
+ fp = 0x000000d2;
+#endif
+ } else {
+#if 0 /*DBI_TPO_480x864*/
+ dpll = 0x00020000;
+ fp = 0x00000156;
+#endif /* DBI_TPO_480x864 */ /* get from spec. */
+
+ dpll = 0x00800000;
+ fp = 0x000000c1;
+ }
+
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ dpll |= DPLL_VCO_ENABLE;
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
+
+ /* wait for DSI PLL to lock */
+ while (timeout < 20000 &&
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout++;
+ }
+
+ if (is_mipi)
+ goto mrst_crtc_mode_set_exit;
+
+ dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
+
+ REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
+ REG_READ(map->conf);
+
+ /* Wait for for the pipe enable to take effect. */
+ REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
+ gma_wait_for_vblank(dev);
+
+mrst_crtc_mode_set_exit:
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
+ .dpms = mdfld_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
+ .mode_set = mdfld_crtc_mode_set,
+ .mode_set_base = mdfld__intel_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+};
+
diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c
new file mode 100644
index 00000000000..c95966bb0c9
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_output.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+
+#include "tc35876x-dsi-lvds.h"
+
+int mdfld_get_panel_type(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->mdfld_panel_id;
+}
+
+static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe,
+ int p_type)
+{
+ switch (p_type) {
+ case TPO_VID:
+ mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs);
+ break;
+ case TC35876X:
+ tc35876x_init(dev);
+ mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs);
+ break;
+ case TMD_VID:
+ mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs);
+ break;
+ case HDMI:
+/* if (dev_priv->mdfld_hdmi_present)
+ mdfld_hdmi_init(dev, &dev_priv->mode_dev); */
+ break;
+ }
+}
+
+
+int mdfld_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* FIXME: hardcoded for now */
+ dev_priv->mdfld_panel_id = TC35876X;
+ /* MIPI panel 1 */
+ mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id);
+ /* HDMI panel */
+ mdfld_init_panel(dev, 1, HDMI);
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
new file mode 100644
index 00000000000..ab2b27c0f03
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_output.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef MDFLD_OUTPUT_H
+#define MDFLD_OUTPUT_H
+
+#include "psb_drv.h"
+
+#define TPO_PANEL_WIDTH 84
+#define TPO_PANEL_HEIGHT 46
+#define TMD_PANEL_WIDTH 39
+#define TMD_PANEL_HEIGHT 71
+
+struct mdfld_dsi_config;
+
+enum panel_type {
+ TPO_VID,
+ TMD_VID,
+ HDMI,
+ TC35876X,
+};
+
+struct panel_info {
+ u32 width_mm;
+ u32 height_mm;
+ /* Other info */
+};
+
+struct panel_funcs {
+ const struct drm_encoder_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_helper_funcs;
+ struct drm_display_mode * (*get_config_mode)(struct drm_device *);
+ int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
+ int (*reset)(int pipe);
+ void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
+};
+
+int mdfld_output_init(struct drm_device *dev);
+
+struct backlight_device *mdfld_get_backlight_device(void);
+int mdfld_set_brightness(struct backlight_device *bd);
+
+int mdfld_get_panel_type(struct drm_device *dev, int pipe);
+
+extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
+
+extern const struct panel_funcs mdfld_tmd_vid_funcs;
+extern const struct panel_funcs mdfld_tpo_vid_funcs;
+
+extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
+#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
new file mode 100644
index 00000000000..dc0c6c3d3d2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jim Liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ * Gideon Eaton <eaton.
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false; /*Disable GCT for now*/
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ if (use_gct) {
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+ } else {
+ mode->hdisplay = 480;
+ mode->vdisplay = 854;
+ mode->hsync_start = 487;
+ mode->hsync_end = 490;
+ mode->htotal = 499;
+ mode->vsync_start = 861;
+ mode->vsync_end = 865;
+ mode->vtotal = 873;
+ mode->clock = 33264;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static int tmd_vid_get_panel_info(struct drm_device *dev,
+ int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = TMD_PANEL_WIDTH;
+ pi->height_mm = TMD_PANEL_HEIGHT;
+
+ return 0;
+}
+
+/* ************************************************************************* *\
+ * FUNCTION: mdfld_init_TMD_MIPI
+ *
+ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
+ * restore_display_registers. since this function does not
+ * acquire the mutex, it is important that the calling function
+ * does!
+\* ************************************************************************* */
+
+/* FIXME: make the below data u8 instead of u32; note byte order! */
+static u32 tmd_cmd_mcap_off[] = {0x000000b2};
+static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
+static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
+static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
+static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
+static u32 tmd_cmd_set_mode[] = {0x000000b3};
+static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
+static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
+static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
+static u32 tmd_cmd_set_video_mode[] = {0x00000153};
+/*no auto_bl,need add in furture*/
+static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
+static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
+
+static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender
+ = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ DRM_INFO("Enter mdfld init TMD MIPI display.\n");
+
+ if (!sender) {
+ DRM_ERROR("Cannot get sender\n");
+ return;
+ }
+
+ if (dsi_config->dvr_ic_inited)
+ return;
+
+ msleep(3);
+
+ /* FIXME: make the below data u8 instead of u32; note byte order! */
+
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off,
+ sizeof(tmd_cmd_mcap_off), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch,
+ sizeof(tmd_cmd_enable_lane_switch), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num,
+ sizeof(tmd_cmd_set_lane_num), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0,
+ sizeof(tmd_cmd_pushing_clock0), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1,
+ sizeof(tmd_cmd_pushing_clock1), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode,
+ sizeof(tmd_cmd_set_mode), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode,
+ sizeof(tmd_cmd_set_sync_pulse_mode), false);
+ mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column,
+ sizeof(tmd_cmd_set_column), false);
+ mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page,
+ sizeof(tmd_cmd_set_page), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode,
+ sizeof(tmd_cmd_set_video_mode), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight,
+ sizeof(tmd_cmd_enable_backlight), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming,
+ sizeof(tmd_cmd_set_backlight_dimming), false);
+
+ dsi_config->dvr_ic_inited = 1;
+}
+
+/*TPO DPI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs
+ mdfld_tpo_dpi_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+/*TPO DPI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tmd_vid_funcs = {
+ .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
+ .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
+ .get_config_mode = &tmd_vid_get_config_mode,
+ .get_panel_info = tmd_vid_get_panel_info,
+ .reset = mdfld_dsi_panel_reset,
+ .drv_ic_init = mdfld_dsi_tmd_drv_ic_init,
+};
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
new file mode 100644
index 00000000000..d8d4170725b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+
+static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ if (use_gct) {
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay +
+ ((ti->hsync_offset_hi << 8) |
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start +
+ ((ti->hsync_pulse_width_hi << 8) |
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
+ ti->hblank_lo);
+ mode->vsync_start =
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) |
+ ti->vsync_offset_lo);
+ mode->vsync_end =
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) |
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay +
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+ } else {
+ mode->hdisplay = 864;
+ mode->vdisplay = 480;
+ mode->hsync_start = 873;
+ mode->hsync_end = 876;
+ mode->htotal = 887;
+ mode->vsync_start = 487;
+ mode->vsync_end = 490;
+ mode->vtotal = 499;
+ mode->clock = 33264;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static int tpo_vid_get_panel_info(struct drm_device *dev,
+ int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = TPO_PANEL_WIDTH;
+ pi->height_mm = TPO_PANEL_HEIGHT;
+
+ return 0;
+}
+
+/*TPO DPI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs
+ mdfld_tpo_dpi_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+/*TPO DPI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tpo_vid_funcs = {
+ .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
+ .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
+ .get_config_mode = &tpo_vid_get_config_mode,
+ .get_panel_info = tpo_vid_get_panel_info,
+};
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
new file mode 100644
index 00000000000..a97e38e284f
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -0,0 +1,338 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* TODO
+ * - Split functions by vbt type
+ * - Make them all take drm_device
+ * - Check ioremap failures
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "mid_bios.h"
+
+static void mid_get_fuse_settings(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ uint32_t fuse_value = 0;
+ uint32_t fuse_value_tmp = 0;
+
+#define FB_REG06 0xD0810600
+#define FB_MIPI_DISABLE (1 << 11)
+#define FB_REG09 0xD0810900
+#define FB_REG09 0xD0810900
+#define FB_SKU_MASK 0x7000
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+ if (pci_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+
+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+ if (IS_MRST(dev))
+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+ DRM_INFO("internal display is %s\n",
+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+ /* Prevent runtime suspend at start*/
+ if (dev_priv->iLVDS_enable) {
+ dev_priv->is_lvds_on = true;
+ dev_priv->is_mipi_on = false;
+ } else {
+ dev_priv->is_mipi_on = true;
+ dev_priv->is_lvds_on = false;
+ }
+
+ dev_priv->video_device_fuse = fuse_value;
+
+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+ dev_priv->fuse_reg_value = fuse_value;
+
+ switch (fuse_value_tmp) {
+ case FB_SKU_100:
+ dev_priv->core_freq = 200;
+ break;
+ case FB_SKU_100L:
+ dev_priv->core_freq = 100;
+ break;
+ case FB_SKU_83:
+ dev_priv->core_freq = 166;
+ break;
+ default:
+ dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+ fuse_value_tmp);
+ dev_priv->core_freq = 0;
+ }
+ dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+ pci_dev_put(pci_root);
+}
+
+/*
+ * Get the revison ID, B0:D2:F0;0x08
+ */
+static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+ uint32_t platform_rev_id = 0;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+ pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+ dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+ pci_dev_put(pci_gfx_root);
+ dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+ dev_priv->platform_rev_id);
+}
+
+struct mid_vbt_header {
+ u32 signature;
+ u8 revision;
+} __packed;
+
+/* The same for r0 and r1 */
+struct vbt_r0 {
+ struct mid_vbt_header vbt_header;
+ u8 size;
+ u8 checksum;
+} __packed;
+
+struct vbt_r10 {
+ struct mid_vbt_header vbt_header;
+ u8 checksum;
+ u16 size;
+ u8 panel_count;
+ u8 primary_panel_idx;
+ u8 secondary_panel_idx;
+ u8 __reserved[5];
+} __packed;
+
+static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (vbt_virtual == NULL)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (!vbt_virtual)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r0 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r1 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r10 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r10 *gct;
+ struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct gct_r10_timing_info *ti;
+ int ret = -1;
+
+ if (read_vbt_r10(addr, &vbt))
+ return -1;
+
+ gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
+ if (!gct)
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt),
+ sizeof(*gct) * vbt.panel_count);
+ if (!gct_virtual)
+ goto out;
+ memcpy_fromio(gct, gct_virtual, sizeof(*gct));
+ iounmap(gct_virtual);
+
+ dev_priv->gct_data.bpi = vbt.primary_panel_idx;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
+
+ ti = &gct[vbt.primary_panel_idx].DTD;
+ dp_ti->pixel_clock = ti->pixel_clock;
+ dp_ti->hactive_hi = ti->hactive_hi;
+ dp_ti->hactive_lo = ti->hactive_lo;
+ dp_ti->hblank_hi = ti->hblank_hi;
+ dp_ti->hblank_lo = ti->hblank_lo;
+ dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
+ dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
+ dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
+ dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
+ dp_ti->vactive_hi = ti->vactive_hi;
+ dp_ti->vactive_lo = ti->vactive_lo;
+ dp_ti->vblank_hi = ti->vblank_hi;
+ dp_ti->vblank_lo = ti->vblank_lo;
+ dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
+ dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
+ dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
+ dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
+
+ ret = 0;
+out:
+ kfree(gct);
+ return ret;
+}
+
+static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ u32 addr;
+ u8 __iomem *vbt_virtual;
+ struct mid_vbt_header vbt_header;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+ int ret = -1;
+
+ /* Get the address of the platform config vbt */
+ pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+ pci_dev_put(pci_gfx_root);
+
+ dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+
+ if (!addr)
+ goto out;
+
+ /* get the virtual address of the vbt */
+ vbt_virtual = ioremap(addr, sizeof(vbt_header));
+ if (!vbt_virtual)
+ goto out;
+
+ memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
+ iounmap(vbt_virtual);
+
+ if (memcmp(&vbt_header.signature, "$GCT", 4))
+ goto out;
+
+ dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
+
+ switch (vbt_header.revision) {
+ case 0x00:
+ ret = mid_get_vbt_data_r0(dev_priv, addr);
+ break;
+ case 0x01:
+ ret = mid_get_vbt_data_r1(dev_priv, addr);
+ break;
+ case 0x10:
+ ret = mid_get_vbt_data_r10(dev_priv, addr);
+ break;
+ default:
+ dev_err(dev->dev, "Unknown revision of GCT!\n");
+ }
+
+out:
+ if (ret)
+ dev_err(dev->dev, "Unable to read GCT!");
+ else
+ dev_priv->has_gct = true;
+}
+
+int mid_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ mid_get_fuse_settings(dev);
+ mid_get_vbt_data(dev_priv);
+ mid_get_pci_revID(dev_priv);
+ return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
new file mode 100644
index 00000000000..00e7d564b7e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+extern int mid_chip_setup(struct drm_device *dev);
+
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
new file mode 100644
index 00000000000..0eaf11c1993
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -0,0 +1,812 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "mmu.h"
+
+/*
+ * Code for the SGX MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+ return offset >> PSB_PDE_SHIFT;
+}
+
+#if defined(CONFIG_X86)
+static inline void psb_clflush(void *addr)
+{
+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
+{
+ if (!driver->has_clflush)
+ return;
+
+ mb();
+ psb_clflush(addr);
+ mb();
+}
+#else
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
+{;
+}
+
+#endif
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
+{
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (atomic_read(&driver->needs_tlbflush) || force) {
+ uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+
+ /* Make sure data cache is turned off before enabling it */
+ wmb();
+ PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+ if (driver->msvdx_mmu_invaldc)
+ atomic_set(driver->msvdx_mmu_invaldc, 1);
+ }
+ atomic_set(&driver->needs_tlbflush, 0);
+}
+
+#if 0
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+ down_write(&driver->sem);
+ psb_mmu_flush_pd_locked(driver, force);
+ up_write(&driver->sem);
+}
+#endif
+
+void psb_mmu_flush(struct psb_mmu_driver *driver)
+{
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ down_write(&driver->sem);
+ val = PSB_RSGX32(PSB_CR_BIF_CTRL);
+ if (atomic_read(&driver->needs_tlbflush))
+ PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+ else
+ PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
+
+ /* Make sure data cache is turned off and MMU is flushed before
+ restoring bank interface control register */
+ wmb();
+ PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
+ PSB_CR_BIF_CTRL);
+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+ atomic_set(&driver->needs_tlbflush, 0);
+ if (driver->msvdx_mmu_invaldc)
+ atomic_set(driver->msvdx_mmu_invaldc, 1);
+ up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+ struct drm_device *dev = pd->driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
+
+ down_write(&pd->driver->sem);
+ PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
+ wmb();
+ psb_mmu_flush_pd_locked(pd->driver, 1);
+ pd->hw_context = hw_context;
+ up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+ unsigned long end)
+{
+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+ return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+ uint32_t mask = PSB_PTE_VALID;
+
+ if (type & PSB_MMU_CACHED_MEMORY)
+ mask |= PSB_PTE_CACHED;
+ if (type & PSB_MMU_RO_MEMORY)
+ mask |= PSB_PTE_RO;
+ if (type & PSB_MMU_WO_MEMORY)
+ mask |= PSB_PTE_WO;
+
+ return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+ int trap_pagefaults, int invalid_type)
+{
+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ uint32_t *v;
+ int i;
+
+ if (!pd)
+ return NULL;
+
+ pd->p = alloc_page(GFP_DMA32);
+ if (!pd->p)
+ goto out_err1;
+ pd->dummy_pt = alloc_page(GFP_DMA32);
+ if (!pd->dummy_pt)
+ goto out_err2;
+ pd->dummy_page = alloc_page(GFP_DMA32);
+ if (!pd->dummy_page)
+ goto out_err3;
+
+ if (!trap_pagefaults) {
+ pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+ invalid_type);
+ pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+ invalid_type);
+ } else {
+ pd->invalid_pde = 0;
+ pd->invalid_pte = 0;
+ }
+
+ v = kmap(pd->dummy_pt);
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ v[i] = pd->invalid_pte;
+
+ kunmap(pd->dummy_pt);
+
+ v = kmap(pd->p);
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ v[i] = pd->invalid_pde;
+
+ kunmap(pd->p);
+
+ clear_page(kmap(pd->dummy_page));
+ kunmap(pd->dummy_page);
+
+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+ if (!pd->tables)
+ goto out_err4;
+
+ pd->hw_context = -1;
+ pd->pd_mask = PSB_PTE_VALID;
+ pd->driver = driver;
+
+ return pd;
+
+out_err4:
+ __free_page(pd->dummy_page);
+out_err3:
+ __free_page(pd->dummy_pt);
+out_err2:
+ __free_page(pd->p);
+out_err1:
+ kfree(pd);
+ return NULL;
+}
+
+static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+ __free_page(pt->p);
+ kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+ struct psb_mmu_driver *driver = pd->driver;
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_mmu_pt *pt;
+ int i;
+
+ down_write(&driver->sem);
+ if (pd->hw_context != -1) {
+ PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
+ psb_mmu_flush_pd_locked(driver, 1);
+ }
+
+ /* Should take the spinlock here, but we don't need to do that
+ since we have the semaphore in write mode. */
+
+ for (i = 0; i < 1024; ++i) {
+ pt = pd->tables[i];
+ if (pt)
+ psb_mmu_free_pt(pt);
+ }
+
+ vfree(pd->tables);
+ __free_page(pd->dummy_page);
+ __free_page(pd->dummy_pt);
+ __free_page(pd->p);
+ kfree(pd);
+ up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+ void *v;
+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
+ spinlock_t *lock = &pd->driver->lock;
+ uint8_t *clf;
+ uint32_t *ptes;
+ int i;
+
+ if (!pt)
+ return NULL;
+
+ pt->p = alloc_page(GFP_DMA32);
+ if (!pt->p) {
+ kfree(pt);
+ return NULL;
+ }
+
+ spin_lock(lock);
+
+ v = kmap_atomic(pt->p);
+ clf = (uint8_t *) v;
+ ptes = (uint32_t *) v;
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ *ptes++ = pd->invalid_pte;
+
+#if defined(CONFIG_X86)
+ if (pd->driver->has_clflush && pd->hw_context != -1) {
+ mb();
+ for (i = 0; i < clflush_count; ++i) {
+ psb_clflush(clf);
+ clf += clflush_add;
+ }
+ mb();
+ }
+#endif
+ kunmap_atomic(v);
+ spin_unlock(lock);
+
+ pt->count = 0;
+ pt->pd = pd;
+ pt->index = 0;
+
+ return pt;
+}
+
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
+{
+ uint32_t index = psb_mmu_pd_index(addr);
+ struct psb_mmu_pt *pt;
+ uint32_t *v;
+ spinlock_t *lock = &pd->driver->lock;
+
+ spin_lock(lock);
+ pt = pd->tables[index];
+ while (!pt) {
+ spin_unlock(lock);
+ pt = psb_mmu_alloc_pt(pd);
+ if (!pt)
+ return NULL;
+ spin_lock(lock);
+
+ if (pd->tables[index]) {
+ spin_unlock(lock);
+ psb_mmu_free_pt(pt);
+ spin_lock(lock);
+ pt = pd->tables[index];
+ continue;
+ }
+
+ v = kmap_atomic(pd->p);
+ pd->tables[index] = pt;
+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+ pt->index = index;
+ kunmap_atomic((void *) v);
+
+ if (pd->hw_context != -1) {
+ psb_mmu_clflush(pd->driver, (void *)&v[index]);
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+ }
+ }
+ pt->v = kmap_atomic(pt->p);
+ return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
+{
+ uint32_t index = psb_mmu_pd_index(addr);
+ struct psb_mmu_pt *pt;
+ spinlock_t *lock = &pd->driver->lock;
+
+ spin_lock(lock);
+ pt = pd->tables[index];
+ if (!pt) {
+ spin_unlock(lock);
+ return NULL;
+ }
+ pt->v = kmap_atomic(pt->p);
+ return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+ struct psb_mmu_pd *pd = pt->pd;
+ uint32_t *v;
+
+ kunmap_atomic(pt->v);
+ if (pt->count == 0) {
+ v = kmap_atomic(pd->p);
+ v[pt->index] = pd->invalid_pde;
+ pd->tables[pt->index] = NULL;
+
+ if (pd->hw_context != -1) {
+ psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+ }
+ kunmap_atomic(pt->v);
+ spin_unlock(&pd->driver->lock);
+ psb_mmu_free_pt(pt);
+ return;
+ }
+ spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
+ uint32_t pte)
+{
+ pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+ unsigned long addr)
+{
+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+ struct psb_mmu_pd *pd;
+
+ down_read(&driver->sem);
+ pd = driver->default_pd;
+ up_read(&driver->sem);
+
+ return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
+{
+ struct psb_mmu_pd *pd;
+
+ pd = psb_mmu_get_default_pd(driver);
+ return page_to_pfn(pd->p) << PAGE_SHIFT;
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+ struct drm_device *dev = driver->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
+ psb_mmu_free_pagedir(driver->default_pd);
+ kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
+ int trap_pagefaults,
+ int invalid_type,
+ atomic_t *msvdx_mmu_invaldc)
+{
+ struct psb_mmu_driver *driver;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+
+ if (!driver)
+ return NULL;
+
+ driver->dev = dev;
+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+ invalid_type);
+ if (!driver->default_pd)
+ goto out_err1;
+
+ spin_lock_init(&driver->lock);
+ init_rwsem(&driver->sem);
+ down_write(&driver->sem);
+ atomic_set(&driver->needs_tlbflush, 1);
+ driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
+
+ driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+
+ driver->has_clflush = 0;
+
+#if defined(CONFIG_X86)
+ if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
+ uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+ /*
+ * clflush size is determined at kernel setup for x86_64 but not
+ * for i386. We have to do it here.
+ */
+
+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+ clflush_size = ((misc >> 8) & 0xff) * 8;
+ driver->has_clflush = 1;
+ driver->clflush_add =
+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
+ driver->clflush_mask = driver->clflush_add - 1;
+ driver->clflush_mask = ~driver->clflush_mask;
+ }
+#endif
+
+ up_write(&driver->sem);
+ return driver;
+
+out_err1:
+ kfree(driver);
+ return NULL;
+}
+
+#if defined(CONFIG_X86)
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+ uint32_t num_pages, uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long clflush_add = pd->driver->clflush_add;
+ unsigned long clflush_mask = pd->driver->clflush_mask;
+
+ if (!pd->driver->has_clflush)
+ return;
+
+ if (hw_tile_stride)
+ rows = num_pages / desired_tile_stride;
+ else
+ desired_tile_stride = num_pages;
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+ mb();
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_map_lock(pd, addr);
+ if (!pt)
+ continue;
+ do {
+ psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
+ } while (addr += clflush_add,
+ (addr & clflush_mask) < next);
+
+ psb_mmu_pt_unmap_unlock(pt);
+ } while (addr = next, next != end);
+ address += row_add;
+ }
+ mb();
+}
+#else
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+ uint32_t num_pages, uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ drm_ttm_cache_flush();
+}
+#endif
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages)
+{
+ struct psb_mmu_pt *pt;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long f_address = address;
+
+ down_read(&pd->driver->sem);
+
+ addr = address;
+ end = addr + (num_pages << PAGE_SHIFT);
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt)
+ goto out;
+ do {
+ psb_mmu_invalidate_pte(pt, addr);
+ --pt->count;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver);
+
+ return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+ uint32_t num_pages, uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long f_address = address;
+
+ if (hw_tile_stride)
+ rows = num_pages / desired_tile_stride;
+ else
+ desired_tile_stride = num_pages;
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+
+ down_read(&pd->driver->sem);
+
+ /* Make sure we only need to flush this processor's cache */
+
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_map_lock(pd, addr);
+ if (!pt)
+ continue;
+ do {
+ psb_mmu_invalidate_pte(pt, addr);
+ --pt->count;
+
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+ address += row_add;
+ }
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages,
+ desired_tile_stride, hw_tile_stride);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+ unsigned long address, uint32_t num_pages,
+ int type)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t pte;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long f_address = address;
+ int ret = -ENOMEM;
+
+ down_read(&pd->driver->sem);
+
+ addr = address;
+ end = addr + (num_pages << PAGE_SHIFT);
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ do {
+ pte = psb_mmu_mask_pte(start_pfn++, type);
+ psb_mmu_set_pte(pt, addr, pte);
+ pt->count++;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+ ret = 0;
+
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver);
+
+ return 0;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride, uint32_t hw_tile_stride,
+ int type)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ uint32_t pte;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long f_address = address;
+ int ret = -ENOMEM;
+
+ if (hw_tile_stride) {
+ if (num_pages % desired_tile_stride != 0)
+ return -EINVAL;
+ rows = num_pages / desired_tile_stride;
+ } else {
+ desired_tile_stride = num_pages;
+ }
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+
+ down_read(&pd->driver->sem);
+
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt)
+ goto out;
+ do {
+ pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
+ type);
+ psb_mmu_set_pte(pt, addr, pte);
+ pt->count++;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+ address += row_add;
+ }
+
+ ret = 0;
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages,
+ desired_tile_stride, hw_tile_stride);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver);
+
+ return ret;
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+ unsigned long *pfn)
+{
+ int ret;
+ struct psb_mmu_pt *pt;
+ uint32_t tmp;
+ spinlock_t *lock = &pd->driver->lock;
+
+ down_read(&pd->driver->sem);
+ pt = psb_mmu_pt_map_lock(pd, virtual);
+ if (!pt) {
+ uint32_t *v;
+
+ spin_lock(lock);
+ v = kmap_atomic(pd->p);
+ tmp = v[psb_mmu_pd_index(virtual)];
+ kunmap_atomic(v);
+ spin_unlock(lock);
+
+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+ !(pd->invalid_pte & PSB_PTE_VALID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
+ goto out;
+ }
+ tmp = pt->v[psb_mmu_pt_index(virtual)];
+ if (!(tmp & PSB_PTE_VALID)) {
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ *pfn = tmp >> PAGE_SHIFT;
+ }
+ psb_mmu_pt_unmap_unlock(pt);
+out:
+ up_read(&pd->driver->sem);
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
new file mode 100644
index 00000000000..e89abec6209
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -0,0 +1,93 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ **************************************************************************/
+
+#ifndef __MMU_H
+#define __MMU_H
+
+struct psb_mmu_driver {
+ /* protects driver- and pd structures. Always take in read mode
+ * before taking the page table spinlock.
+ */
+ struct rw_semaphore sem;
+
+ /* protects page tables, directory tables and pt tables.
+ * and pt structures.
+ */
+ spinlock_t lock;
+
+ atomic_t needs_tlbflush;
+ atomic_t *msvdx_mmu_invaldc;
+ struct psb_mmu_pd *default_pd;
+ uint32_t bif_ctrl;
+ int has_clflush;
+ int clflush_add;
+ unsigned long clflush_mask;
+
+ struct drm_device *dev;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+ struct psb_mmu_pd *pd;
+ uint32_t index;
+ uint32_t count;
+ struct page *p;
+ uint32_t *v;
+};
+
+struct psb_mmu_pd {
+ struct psb_mmu_driver *driver;
+ int hw_context;
+ struct psb_mmu_pt **tables;
+ struct page *p;
+ struct page *dummy_pt;
+ struct page *dummy_page;
+ uint32_t pd_mask;
+ uint32_t invalid_pde;
+ uint32_t invalid_pte;
+};
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
+ int trap_pagefaults,
+ int invalid_type,
+ atomic_t *msvdx_mmu_invaldc);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+ *driver);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+ int trap_pagefaults,
+ int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+ unsigned long address,
+ uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+ uint32_t start_pfn,
+ unsigned long address,
+ uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+ unsigned long *pfn);
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
new file mode 100644
index 00000000000..30adbbe2302
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -0,0 +1,257 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* MID device specific descriptors */
+
+struct oaktrail_timing_info {
+ u16 pixel_clock;
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_offset_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_offset_hi:2;
+ u8 hsync_pulse_width_hi:2;
+ u8 hsync_offset_hi:2;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 height_mm_hi:4;
+ u8 width_mm_hi:4;
+ u8 hborder;
+ u8 vborder;
+ u8 unknown0:1;
+ u8 hsync_positive:1;
+ u8 vsync_positive:1;
+ u8 separate_sync:2;
+ u8 stereo:1;
+ u8 unknown6:1;
+ u8 interlaced:1;
+} __packed;
+
+struct gct_r10_timing_info {
+ u16 pixel_clock;
+ u32 hactive_lo:8;
+ u32 hactive_hi:4;
+ u32 hblank_lo:8;
+ u32 hblank_hi:4;
+ u32 hsync_offset_lo:8;
+ u16 hsync_offset_hi:2;
+ u16 hsync_pulse_width_lo:8;
+ u16 hsync_pulse_width_hi:2;
+ u16 hsync_positive:1;
+ u16 rsvd_1:3;
+ u8 vactive_lo:8;
+ u16 vactive_hi:4;
+ u16 vblank_lo:8;
+ u16 vblank_hi:4;
+ u16 vsync_offset_lo:4;
+ u16 vsync_offset_hi:2;
+ u16 vsync_pulse_width_lo:4;
+ u16 vsync_pulse_width_hi:2;
+ u16 vsync_positive:1;
+ u16 rsvd_2:3;
+} __packed;
+
+struct oaktrail_panel_descriptor_v1 {
+ u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+ /* 0x61190 if MIPI */
+ u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+ /* Register 0x61210 */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+ u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+ u16 Panel_MIPI_Display_Descriptor;
+ /*16 bits, Defined as follows: */
+ /* if MIPI, 0x0000 if LVDS */
+ /* Bit 0, Type, 2 bits, */
+ /* 0: Type-1, */
+ /* 1: Type-2, */
+ /* 2: Type-3, */
+ /* 3: Type-4 */
+ /* Bit 2, Pixel Format, 4 bits */
+ /* Bit0: 16bpp (not supported in LNC), */
+ /* Bit1: 18bpp loosely packed, */
+ /* Bit2: 18bpp packed, */
+ /* Bit3: 24bpp */
+ /* Bit 6, Reserved, 2 bits, 00b */
+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+ /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+struct oaktrail_panel_descriptor_v2 {
+ u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+ /* 0x61190 if MIPI */
+ u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+ /* Register 0x61210 */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+ u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+ u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+ u16 Panel_MIPI_Display_Descriptor;
+ /*16 bits, Defined as follows: */
+ /* if MIPI, 0x0000 if LVDS */
+ /* Bit 0, Type, 2 bits, */
+ /* 0: Type-1, */
+ /* 1: Type-2, */
+ /* 2: Type-3, */
+ /* 3: Type-4 */
+ /* Bit 2, Pixel Format, 4 bits */
+ /* Bit0: 16bpp (not supported in LNC), */
+ /* Bit1: 18bpp loosely packed, */
+ /* Bit2: 18bpp packed, */
+ /* Bit3: 24bpp */
+ /* Bit 6, Reserved, 2 bits, 00b */
+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+ /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+union oaktrail_panel_rx {
+ struct {
+ u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+ u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+ u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
+ /* 1: Burst and non-burst */
+ /* 2/3: Reserved */
+ u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+ u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+ u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
+ u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
+ u16 Rsvd:5;/*5 bits,00000b */
+ } panelrx;
+ u16 panel_receiver;
+} __packed;
+
+struct gct_r0 {
+ union { /*8 bits,Defined as follows: */
+ struct {
+ u8 PanelType:4; /*4 bits, Bit field for panels*/
+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+ /*2 bits,Specifies which of the*/
+ u8 BootPanelIndex:2;
+ /* 4 panels to use by default*/
+ u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+ /* the 4 MIPI DSI receivers to use*/
+ } PD;
+ u8 PanelDescriptor;
+ };
+ struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct gct_r1 {
+ union { /*8 bits,Defined as follows: */
+ struct {
+ u8 PanelType:4; /*4 bits, Bit field for panels*/
+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+ /*2 bits,Specifies which of the*/
+ u8 BootPanelIndex:2;
+ /* 4 panels to use by default*/
+ u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+ /* the 4 MIPI DSI receivers to use*/
+ } PD;
+ u8 PanelDescriptor;
+ };
+ struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct gct_r10 {
+ struct gct_r10_timing_info DTD;
+ u16 Panel_MIPI_Display_Descriptor;
+ u16 Panel_MIPI_Receiver_Descriptor;
+ u16 Panel_Backlight_Inverter_Descriptor;
+ u8 Panel_Initial_Brightness;
+ u32 MIPI_Ctlr_Init_ptr;
+ u32 MIPI_Panel_Init_ptr;
+} __packed;
+
+struct oaktrail_gct_data {
+ u8 bpi; /* boot panel index, number of panel used during boot */
+ u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+ struct oaktrail_timing_info DTD; /* timing info for the selected panel */
+ u32 Panel_Port_Control;
+ u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u32 PP_Cycle_Delay;
+ u16 Panel_Backlight_Inverter_Descriptor;
+ u16 Panel_MIPI_Display_Descriptor;
+} __packed;
+
+#define MODE_SETTING_IN_CRTC 0x1
+#define MODE_SETTING_IN_ENCODER 0x2
+#define MODE_SETTING_ON_GOING 0x3
+#define MODE_SETTING_IN_DSR 0x4
+#define MODE_SETTING_ENCODER_DONE 0x8
+
+/*
+ * Moorestown HDMI interfaces
+ */
+
+struct oaktrail_hdmi_dev {
+ struct pci_dev *dev;
+ void __iomem *regs;
+ unsigned int mmio, mmio_len;
+ int dpms_mode;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ /* register state */
+ u32 saveDPLL_CTRL;
+ u32 saveDPLL_DIV_CTRL;
+ u32 saveDPLL_ADJUST;
+ u32 saveDPLL_UPDATE;
+ u32 saveDPLL_CLK_ENABLE;
+ u32 savePCH_HTOTAL_B;
+ u32 savePCH_HBLANK_B;
+ u32 savePCH_HSYNC_B;
+ u32 savePCH_VTOTAL_B;
+ u32 savePCH_VBLANK_B;
+ u32 savePCH_VSYNC_B;
+ u32 savePCH_PIPEBCONF;
+ u32 savePCH_PIPEBSRC;
+};
+
+extern void oaktrail_hdmi_setup(struct drm_device *dev);
+extern void oaktrail_hdmi_teardown(struct drm_device *dev);
+extern int oaktrail_hdmi_i2c_init(struct pci_dev *dev);
+extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
+extern void oaktrail_hdmi_save(struct drm_device *dev);
+extern void oaktrail_hdmi_restore(struct drm_device *dev);
+extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
+
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
new file mode 100644
index 00000000000..2de216c2374
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -0,0 +1,672 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "gma_display.h"
+#include "power.h"
+
+#define MRST_LIMIT_LVDS_100L 0
+#define MRST_LIMIT_LVDS_83 1
+#define MRST_LIMIT_LVDS_100 2
+#define MRST_LIMIT_SDVO 3
+
+#define MRST_DOT_MIN 19750
+#define MRST_DOT_MAX 120000
+#define MRST_M_MIN_100L 20
+#define MRST_M_MIN_100 10
+#define MRST_M_MIN_83 12
+#define MRST_M_MAX_100L 34
+#define MRST_M_MAX_100 17
+#define MRST_M_MAX_83 20
+#define MRST_P1_MIN 2
+#define MRST_P1_MAX_0 7
+#define MRST_P1_MAX_1 8
+
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock);
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock);
+
+static const struct gma_limit_t mrst_limits[] = {
+ { /* MRST_LIMIT_LVDS_100L */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ .find_pll = mrst_lvds_find_best_pll,
+ },
+ { /* MRST_LIMIT_LVDS_83L */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+ .find_pll = mrst_lvds_find_best_pll,
+ },
+ { /* MRST_LIMIT_LVDS_100 */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ .find_pll = mrst_lvds_find_best_pll,
+ },
+ { /* MRST_LIMIT_SDVO */
+ .vco = {.min = 1400000, .max = 2800000},
+ .n = {.min = 3, .max = 7},
+ .m = {.min = 80, .max = 137},
+ .p1 = {.min = 1, .max = 2},
+ .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = mrst_sdvo_find_best_pll,
+ },
+};
+
+#define MRST_M_MIN 10
+static const u32 oaktrail_m_converts[] = {
+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
+};
+
+static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ const struct gma_limit_t *limit = NULL;
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+ switch (dev_priv->core_freq) {
+ case 100:
+ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
+ break;
+ case 166:
+ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
+ break;
+ case 200:
+ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
+ break;
+ }
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ limit = &mrst_limits[MRST_LIMIT_SDVO];
+ } else {
+ limit = NULL;
+ dev_err(dev->dev, "mrst_limit Wrong display type.\n");
+ }
+
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
+{
+ clock->dot = (refclk * clock->m) / (14 * clock->p1);
+}
+
+static void mrst_print_pll(struct gma_clock_t *clock)
+{
+ DRM_DEBUG_DRIVER("dotclock=%d, m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
+ clock->dot, clock->m, clock->m1, clock->m2, clock->n,
+ clock->p1, clock->p2);
+}
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock)
+{
+ struct gma_clock_t clock;
+ u32 target_vco, actual_freq;
+ s32 freq_error, min_error = 100000;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
+ clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ /* p2 value always stored in p2_slow on SDVO */
+ clock.p = clock.p1 * limit->p2.p2_slow;
+ target_vco = target * clock.p;
+
+ /* VCO will increase at this point so break */
+ if (target_vco > limit->vco.max)
+ break;
+
+ if (target_vco < limit->vco.min)
+ continue;
+
+ actual_freq = (refclk * clock.m) /
+ (clock.n * clock.p);
+ freq_error = 10000 -
+ ((target * 10000) / actual_freq);
+
+ if (freq_error < -min_error) {
+ /* freq_error will start to decrease at
+ this point so break */
+ break;
+ }
+
+ if (freq_error < 0)
+ freq_error = -freq_error;
+
+ if (freq_error < min_error) {
+ min_error = freq_error;
+ *best_clock = clock;
+ }
+ }
+ }
+ if (min_error == 0)
+ break;
+ }
+
+ return min_error == 0;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE. Divisor values are the actual divisors for
+ */
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock)
+{
+ struct gma_clock_t clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ mrst_lvds_clock(refclk, &clock);
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ return err != target;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 temp;
+ int i;
+ int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ oaktrail_crtc_hdmi_dpms(crtc, mode);
+ return;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ for (i = 0; i <= need_aux; i++) {
+ /* Enable the DPLL */
+ temp = REG_READ_WITH_AUX(map->dpll, i);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE_WITH_AUX(map->dpll, temp, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE_WITH_AUX(map->dpll,
+ temp | DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE_WITH_AUX(map->dpll,
+ temp | DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ temp = REG_READ_WITH_AUX(map->conf, i);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE_WITH_AUX(map->conf,
+ temp | PIPEACONF_ENABLE, i);
+ }
+
+ /* Enable the plane */
+ temp = REG_READ_WITH_AUX(map->cntr, i);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE_WITH_AUX(map->cntr,
+ temp | DISPLAY_PLANE_ENABLE,
+ i);
+ /* Flush the plane changes */
+ REG_WRITE_WITH_AUX(map->base,
+ REG_READ_WITH_AUX(map->base, i), i);
+ }
+
+ }
+ gma_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ for (i = 0; i <= need_aux; i++) {
+ /* Disable the VGA plane that we never use */
+ REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
+ /* Disable display plane */
+ temp = REG_READ_WITH_AUX(map->cntr, i);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE_WITH_AUX(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE, i);
+ /* Flush the plane changes */
+ REG_WRITE_WITH_AUX(map->base,
+ REG_READ(map->base), i);
+ REG_READ_WITH_AUX(map->base, i);
+ }
+
+ /* Next, disable display pipes */
+ temp = REG_READ_WITH_AUX(map->conf, i);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE_WITH_AUX(map->conf,
+ temp & ~PIPEACONF_ENABLE, i);
+ REG_READ_WITH_AUX(map->conf, i);
+ }
+ /* Wait for for the pipe disable to take effect. */
+ gma_wait_for_vblank(dev);
+
+ temp = REG_READ_WITH_AUX(map->dpll, i);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE_WITH_AUX(map->dpll,
+ temp & ~DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ }
+ break;
+ }
+
+ /* Set FIFO Watermarks (values taken from EMGD) */
+ REG_WRITE(DSPARB, 0x3f80);
+ REG_WRITE(DSPFW1, 0x3f8f0404);
+ REG_WRITE(DSPFW2, 0x04040f04);
+ REG_WRITE(DSPFW3, 0x0);
+ REG_WRITE(DSPFW4, 0x04040404);
+ REG_WRITE(DSPFW5, 0x04040404);
+ REG_WRITE(DSPFW6, 0x78);
+ REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
+
+ gma_power_end(dev);
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 3;
+}
+
+static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ int refclk = 0;
+ struct gma_clock_t clock;
+ const struct gma_limit_t *limit;
+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false;
+ bool is_lvds = false;
+ bool is_mipi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct gma_encoder *gma_encoder = NULL;
+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+ struct drm_connector *connector;
+ int i;
+ int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ memcpy(&gma_crtc->saved_mode,
+ mode,
+ sizeof(struct drm_display_mode));
+ memcpy(&gma_crtc->saved_adjusted_mode,
+ adjusted_mode,
+ sizeof(struct drm_display_mode));
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ gma_encoder = gma_attached_encoder(connector);
+
+ switch (gma_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_MIPI:
+ is_mipi = true;
+ break;
+ }
+ }
+
+ /* Disable the VGA plane that we never use */
+ for (i = 0; i <= need_aux; i++)
+ REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (oaktrail_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
+ (mode->crtc_vdisplay - 1), i);
+ }
+
+ if (gma_encoder)
+ drm_object_property_get_value(&connector->base,
+ dev->mode_config.scaling_mode_property, &scalingType);
+
+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+ /* Moorestown doesn't have register support for centering so
+ * we need to mess with the h/vblank and h/vsync start and
+ * ends to get centering */
+ int offsetX = 0, offsetY = 0;
+
+ offsetX = (adjusted_mode->crtc_hdisplay -
+ mode->crtc_hdisplay) / 2;
+ offsetY = (adjusted_mode->crtc_vdisplay -
+ mode->crtc_vdisplay) / 2;
+
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hblank,
+ (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hsync,
+ (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vblank,
+ (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vsync,
+ (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
+ }
+ } else {
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16), i);
+ REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16), i);
+ }
+ }
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(map->conf);
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(map->cntr);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ if (is_mipi)
+ goto oaktrail_crtc_mode_set_exit;
+
+
+ dpll = 0; /*BIT16 = 0 for 100MHz reference */
+
+ refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
+ limit = mrst_limit(crtc, refclk);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
+ refclk, &clock);
+
+ if (is_sdvo) {
+ /* Convert calculated values to register values */
+ clock.p1 = (1L << (clock.p1 - 1));
+ clock.m -= 2;
+ clock.n = (1L << (clock.n - 1));
+ }
+
+ if (!ok)
+ DRM_ERROR("Failed to find proper PLL settings");
+
+ mrst_print_pll(&clock);
+
+ if (is_sdvo)
+ fp = clock.n << 16 | clock.m;
+ else
+ fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+
+ dpll |= DPLL_VGA_MODE_DIS;
+
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ if (is_lvds)
+ dpll |= DPLLA_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (is_sdvo) {
+ int sdvo_pixel_multiply =
+ adjusted_mode->clock / mode->clock;
+
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |=
+ (sdvo_pixel_multiply -
+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+
+ /* compute bitmask from p1 value */
+ if (is_sdvo)
+ dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
+ else
+ dpll |= (1 << (clock.p1 - 2)) << 17;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->fp0, fp, i);
+ REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Check the DPLLA lock bit PIPEACONF[29] */
+ udelay(150);
+ }
+ }
+
+ for (i = 0; i <= need_aux; i++) {
+ REG_WRITE_WITH_AUX(map->fp0, fp, i);
+ REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ /* write it again -- the BIOS does, after all */
+ REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+ REG_READ_WITH_AUX(map->dpll, i);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
+ REG_READ_WITH_AUX(map->conf, i);
+ gma_wait_for_vblank(dev);
+
+ REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
+ gma_wait_for_vblank(dev);
+ }
+
+oaktrail_crtc_mode_set_exit:
+ gma_power_end(dev);
+ return 0;
+}
+
+static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ unsigned long start, offset;
+
+ u32 dspcntr;
+ int ret = 0;
+
+ /* no fb bound */
+ if (!crtc->primary->fb) {
+ dev_dbg(dev->dev, "No FB bound\n");
+ return 0;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ start = psbfb->gtt->offset;
+ offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
+
+ REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
+
+ dspcntr = REG_READ(map->cntr);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->primary->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto pipe_set_base_exit;
+ }
+ REG_WRITE(map->cntr, dspcntr);
+
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
+
+pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
+ .dpms = oaktrail_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
+ .mode_set = oaktrail_crtc_mode_set,
+ .mode_set_base = oaktrail_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+};
+
+/* Not used yet */
+const struct gma_clock_funcs mrst_clock_funcs = {
+ .clock = mrst_lvds_clock,
+ .limit = mrst_limit,
+ .pll_is_valid = gma_pll_is_valid,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
new file mode 100644
index 00000000000..368a03ae301
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -0,0 +1,575 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mid_bios.h"
+#include "intel_bios.h"
+
+static int oaktrail_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->iLVDS_enable)
+ oaktrail_lvds_init(dev, &dev_priv->mode_dev);
+ else
+ dev_err(dev->dev, "DSI is not supported\n");
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+
+ psb_intel_sdvo_init(dev, SDVOB);
+
+ return 0;
+}
+
+/*
+ * Provide the low level interfaces for the Moorestown backlight
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BLC_ADJUSTMENT_MAX 100
+
+static struct backlight_device *oaktrail_backlight_device;
+static int oaktrail_brightness;
+
+static int oaktrail_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
+ u32 max_pwm_blc;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ if (gma_power_begin(dev, 0)) {
+ /* Calculate and set the brightness value */
+ max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+ blc_pwm_ctl = level * max_pwm_blc / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj1;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj2;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* force PWM bit on */
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+ gma_power_end(dev);
+ }
+ oaktrail_brightness = level;
+ return 0;
+}
+
+static int oaktrail_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return oaktrail_brightness;
+}
+
+static int device_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+ bl_max_freq = 256;
+ /* this needs to be set elsewhere */
+ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+ return -ERANGE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_ops = {
+ .get_brightness = oaktrail_get_brightness,
+ .update_status = oaktrail_set_brightness,
+};
+
+static int oaktrail_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
+ NULL, (void *)dev, &oaktrail_ops, &props);
+
+ if (IS_ERR(oaktrail_backlight_device))
+ return PTR_ERR(oaktrail_backlight_device);
+
+ ret = device_backlight_init(dev);
+ if (ret < 0) {
+ backlight_device_unregister(oaktrail_backlight_device);
+ return ret;
+ }
+ oaktrail_backlight_device->props.brightness = 100;
+ oaktrail_backlight_device->props.max_brightness = 100;
+ backlight_update_status(oaktrail_backlight_device);
+ dev_priv->backlight_device = oaktrail_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Moorestown specific chip logic and low level methods
+ * for power management
+ */
+
+/**
+ * oaktrail_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int oaktrail_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
+ int i;
+ u32 pp_stat;
+
+ /* Display arbitration control + watermarks */
+ regs->psb.saveDSPARB = PSB_RVDC32(DSPARB);
+ regs->psb.saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ regs->psb.saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ regs->psb.saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ regs->psb.saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ regs->psb.saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ regs->psb.saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Pipe & plane A info */
+ p->conf = PSB_RVDC32(PIPEACONF);
+ p->src = PSB_RVDC32(PIPEASRC);
+ p->fp0 = PSB_RVDC32(MRST_FPA0);
+ p->fp1 = PSB_RVDC32(MRST_FPA1);
+ p->dpll = PSB_RVDC32(MRST_DPLL_A);
+ p->htotal = PSB_RVDC32(HTOTAL_A);
+ p->hblank = PSB_RVDC32(HBLANK_A);
+ p->hsync = PSB_RVDC32(HSYNC_A);
+ p->vtotal = PSB_RVDC32(VTOTAL_A);
+ p->vblank = PSB_RVDC32(VBLANK_A);
+ p->vsync = PSB_RVDC32(VSYNC_A);
+ regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+ p->cntr = PSB_RVDC32(DSPACNTR);
+ p->stride = PSB_RVDC32(DSPASTRIDE);
+ p->addr = PSB_RVDC32(DSPABASE);
+ p->surf = PSB_RVDC32(DSPASURF);
+ p->linoff = PSB_RVDC32(DSPALINOFF);
+ p->tileoff = PSB_RVDC32(DSPATILEOFF);
+
+ /* Save cursor regs */
+ regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+ regs->psb.saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+ regs->psb.saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+ /* Save palette (gamma) */
+ for (i = 0; i < 256; i++)
+ p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_save(dev);
+
+ /* Save performance state */
+ regs->psb.savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+
+ /* LVDS state */
+ regs->psb.savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+ regs->psb.savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+ regs->psb.savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+ regs->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+ regs->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+ regs->psb.saveLVDS = PSB_RVDC32(LVDS);
+ regs->psb.savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ regs->psb.savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+ regs->psb.savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+ regs->psb.savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+
+ /* HW overlay */
+ regs->psb.saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+ regs->psb.saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+ regs->psb.saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+ regs->psb.saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+ regs->psb.saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+ regs->psb.saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+ regs->psb.saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+ /* DPST registers */
+ regs->psb.saveHISTOGRAM_INT_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+ regs->psb.savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ if (dev_priv->iLVDS_enable) {
+ /* Shut down the panel */
+ PSB_WVDC32(0, PP_CONTROL);
+
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x80000000);
+
+ /* Turn off the plane */
+ PSB_WVDC32(0x58000000, DSPACNTR);
+ /* Trigger the plane disable */
+ PSB_WVDC32(0, DSPASURF);
+
+ /* Wait ~4 ticks */
+ msleep(4);
+
+ /* Turn off pipe */
+ PSB_WVDC32(0x0, PIPEACONF);
+ /* Wait ~8 ticks */
+ msleep(8);
+
+ /* Turn off PLLs */
+ PSB_WVDC32(0, MRST_DPLL_A);
+ }
+ return 0;
+}
+
+/**
+ * oaktrail_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int oaktrail_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
+ u32 pp_stat;
+ int i;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(regs->psb.saveDSPARB, DSPARB);
+ PSB_WVDC32(regs->psb.saveDSPFW1, DSPFW1);
+ PSB_WVDC32(regs->psb.saveDSPFW2, DSPFW2);
+ PSB_WVDC32(regs->psb.saveDSPFW3, DSPFW3);
+ PSB_WVDC32(regs->psb.saveDSPFW4, DSPFW4);
+ PSB_WVDC32(regs->psb.saveDSPFW5, DSPFW5);
+ PSB_WVDC32(regs->psb.saveDSPFW6, DSPFW6);
+ PSB_WVDC32(regs->psb.saveCHICKENBIT, DSPCHICKENBIT);
+
+ /* Make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ /* set the plls */
+ PSB_WVDC32(p->fp0, MRST_FPA0);
+ PSB_WVDC32(p->fp1, MRST_FPA1);
+
+ /* Actually enable it */
+ PSB_WVDC32(p->dpll, MRST_DPLL_A);
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ PSB_WVDC32(p->htotal, HTOTAL_A);
+ PSB_WVDC32(p->hblank, HBLANK_A);
+ PSB_WVDC32(p->hsync, HSYNC_A);
+ PSB_WVDC32(p->vtotal, VTOTAL_A);
+ PSB_WVDC32(p->vblank, VBLANK_A);
+ PSB_WVDC32(p->vsync, VSYNC_A);
+ PSB_WVDC32(p->src, PIPEASRC);
+ PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
+
+ /* Restore performance mode*/
+ PSB_WVDC32(regs->psb.savePERF_MODE, MRST_PERF_MODE);
+
+ /* Enable the pipe*/
+ if (dev_priv->iLVDS_enable)
+ PSB_WVDC32(p->conf, PIPEACONF);
+
+ /* Set up the plane*/
+ PSB_WVDC32(p->linoff, DSPALINOFF);
+ PSB_WVDC32(p->stride, DSPASTRIDE);
+ PSB_WVDC32(p->tileoff, DSPATILEOFF);
+
+ /* Enable the plane */
+ PSB_WVDC32(p->cntr, DSPACNTR);
+ PSB_WVDC32(p->surf, DSPASURF);
+
+ /* Enable Cursor A */
+ PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
+ PSB_WVDC32(regs->psb.saveDSPACURSOR_POS, CURAPOS);
+ PSB_WVDC32(regs->psb.saveDSPACURSOR_BASE, CURABASE);
+
+ /* Restore palette (gamma) */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_restore(dev);
+
+ if (dev_priv->iLVDS_enable) {
+ PSB_WVDC32(regs->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+ PSB_WVDC32(regs->psb.saveLVDS, LVDS); /*port 61180h*/
+ PSB_WVDC32(regs->psb.savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(regs->psb.savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+ PSB_WVDC32(regs->psb.savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+ PSB_WVDC32(regs->saveBLC_PWM_CTL, BLC_PWM_CTL);
+ PSB_WVDC32(regs->psb.savePP_ON_DELAYS, LVDSPP_ON);
+ PSB_WVDC32(regs->psb.savePP_OFF_DELAYS, LVDSPP_OFF);
+ PSB_WVDC32(regs->psb.savePP_DIVISOR, PP_CYCLE);
+ PSB_WVDC32(regs->psb.savePP_CONTROL, PP_CONTROL);
+ }
+
+ /* Wait for cycle delay */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x08000000);
+
+ /* Wait for panel power up */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x10000000);
+
+ /* Restore HW overlay */
+ PSB_WVDC32(regs->psb.saveOV_OVADD, OV_OVADD);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC0, OV_OGAMC0);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC1, OV_OGAMC1);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC2, OV_OGAMC2);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC3, OV_OGAMC3);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC4, OV_OGAMC4);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC5, OV_OGAMC5);
+
+ /* DPST registers */
+ PSB_WVDC32(regs->psb.saveHISTOGRAM_INT_CONTROL_REG,
+ HISTOGRAM_INT_CONTROL);
+ PSB_WVDC32(regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG,
+ HISTOGRAM_LOGIC_CONTROL);
+ PSB_WVDC32(regs->psb.savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+
+ return 0;
+}
+
+/**
+ * oaktrail_power_down - power down the display island
+ * @dev: our DRM device
+ *
+ * Power down the display interface of our device
+ */
+static int oaktrail_power_down(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask ;
+ u32 pwr_sts;
+
+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == pwr_mask)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+/*
+ * oaktrail_power_up
+ *
+ * Restore power to the specified island(s) (powergating)
+ */
+static int oaktrail_power_up(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ u32 pwr_sts, pwr_cnt;
+
+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+ pwr_cnt &= ~pwr_mask;
+ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == 0)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+/* Oaktrail */
+static const struct psb_offset oaktrail_regmap[2] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .base = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .base = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+};
+
+static int oaktrail_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+
+ dev_priv->regmap = oaktrail_regmap;
+
+ ret = mid_chip_setup(dev);
+ if (ret < 0)
+ return ret;
+ if (!dev_priv->has_gct) {
+ /* Now pull the BIOS data */
+ psb_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ }
+ gma_intel_setup_gmbus(dev);
+ oaktrail_hdmi_setup(dev);
+ return 0;
+}
+
+static void oaktrail_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ gma_intel_teardown_gmbus(dev);
+ oaktrail_hdmi_teardown(dev);
+ if (!dev_priv->has_gct)
+ psb_intel_destroy_bios(dev);
+}
+
+const struct psb_ops oaktrail_chip_ops = {
+ .name = "Oaktrail",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .hdmi_mask = (1 << 1),
+ .lvds_mask = (1 << 0),
+ .sdvo_mask = (1 << 1),
+ .cursor_needs_phys = 0,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = oaktrail_chip_setup,
+ .chip_teardown = oaktrail_teardown,
+ .crtc_helper = &oaktrail_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = oaktrail_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = oaktrail_backlight_init,
+#endif
+
+ .save_regs = oaktrail_save_display_registers,
+ .restore_regs = oaktrail_restore_display_registers,
+ .power_down = oaktrail_power_down,
+ .power_up = oaktrail_power_up,
+
+ .i2c_bus = 1,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
new file mode 100644
index 00000000000..cf018ddcc5a
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+
+#define HDMI_HICR 0x1004
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_DETECT_HDP (1 << 0)
+
+#define HDMI_VIDEO_REG 0x3000
+#define HDMI_UNIT_EN (1 << 7)
+#define HDMI_MODE_OUTPUT (1 << 0)
+#define HDMI_HBLANK_A 0x3100
+
+#define HDMI_AUDIO_CTRL 0x4000
+#define HDMI_ENABLE_AUDIO (1 << 0)
+
+#define PCH_HTOTAL_B 0x3100
+#define PCH_HBLANK_B 0x3104
+#define PCH_HSYNC_B 0x3108
+#define PCH_VTOTAL_B 0x310C
+#define PCH_VBLANK_B 0x3110
+#define PCH_VSYNC_B 0x3114
+#define PCH_PIPEBSRC 0x311C
+
+#define PCH_PIPEB_DSL 0x3800
+#define PCH_PIPEB_SLC 0x3804
+#define PCH_PIPEBCONF 0x3808
+#define PCH_PIPEBSTAT 0x3824
+
+#define CDVO_DFT 0x5000
+#define CDVO_SLEWRATE 0x5004
+#define CDVO_STRENGTH 0x5008
+#define CDVO_RCOMP 0x500C
+
+#define DPLL_CTRL 0x6000
+#define DPLL_PDIV_SHIFT 16
+#define DPLL_PDIV_MASK (0xf << 16)
+#define DPLL_PWRDN (1 << 4)
+#define DPLL_RESET (1 << 3)
+#define DPLL_FASTEN (1 << 2)
+#define DPLL_ENSTAT (1 << 1)
+#define DPLL_DITHEN (1 << 0)
+
+#define DPLL_DIV_CTRL 0x6004
+#define DPLL_CLKF_MASK 0xffffffc0
+#define DPLL_CLKR_MASK (0x3f)
+
+#define DPLL_CLK_ENABLE 0x6008
+#define DPLL_EN_DISP (1 << 31)
+#define DPLL_SEL_HDMI (1 << 8)
+#define DPLL_EN_HDMI (1 << 1)
+#define DPLL_EN_VGA (1 << 0)
+
+#define DPLL_ADJUST 0x600C
+#define DPLL_STATUS 0x6010
+#define DPLL_UPDATE 0x6014
+#define DPLL_DFT 0x6020
+
+struct intel_range {
+ int min, max;
+};
+
+struct oaktrail_hdmi_limit {
+ struct intel_range vco, np, nr, nf;
+};
+
+struct oaktrail_hdmi_clock {
+ int np;
+ int nr;
+ int nf;
+ int dot;
+};
+
+#define VCO_MIN 320000
+#define VCO_MAX 1650000
+#define NP_MIN 1
+#define NP_MAX 15
+#define NR_MIN 1
+#define NR_MAX 64
+#define NF_MIN 2
+#define NF_MAX 4095
+
+static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
+ .vco = { .min = VCO_MIN, .max = VCO_MAX },
+ .np = { .min = NP_MIN, .max = NP_MAX },
+ .nr = { .min = NR_MIN, .max = NR_MAX },
+ .nf = { .min = NF_MIN, .max = NF_MAX },
+};
+
+static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(HDMI_HCR, 0x67);
+ HDMI_READ(HDMI_HCR);
+
+ HDMI_WRITE(0x51a8, 0x10);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+}
+
+static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(0x51a8, 0x0);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+
+ HDMI_WRITE(HDMI_HCR, 0x47);
+ HDMI_READ(HDMI_HCR);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+ u32 htotal, new_crtc_htotal;
+
+ htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+ /*
+ * 1024 x 768 new_crtc_htotal = 0x1024;
+ * 1280 x 1024 new_crtc_htotal = 0x0c34;
+ */
+ new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+ DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
+ return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+ int np_min, np_max, nr_min, nr_max;
+ int np, nr, nf;
+
+ np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+ np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+ if (np_min < oaktrail_hdmi_limit.np.min)
+ np_min = oaktrail_hdmi_limit.np.min;
+ if (np_max > oaktrail_hdmi_limit.np.max)
+ np_max = oaktrail_hdmi_limit.np.max;
+
+ nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+ nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+ if (nr_min < oaktrail_hdmi_limit.nr.min)
+ nr_min = oaktrail_hdmi_limit.nr.min;
+ if (nr_max > oaktrail_hdmi_limit.nr.max)
+ nr_max = oaktrail_hdmi_limit.nr.max;
+
+ np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+ nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+ nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+ DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+ /*
+ * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
+ * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+ */
+ best_clock->np = np;
+ best_clock->nr = nr - 1;
+ best_clock->nf = (nf << 14);
+}
+
+static void scu_busy_loop(void __iomem *scu_base)
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = readl(scu_base + 0x04);
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = readl(scu_base + 0x04);
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 1000) {
+ DRM_DEBUG_KMS("SCU IPC timed out");
+ return;
+ }
+ }
+}
+
+/*
+ * You don't want to know, you really really don't want to know....
+ *
+ * This is magic. However it's safe magic because of the way the platform
+ * works and it is necessary magic.
+ */
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+ void __iomem *base;
+ unsigned long scu_ipc_mmio = 0xff11c000UL;
+ int scu_len = 1024;
+
+ base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+ if (base == NULL) {
+ DRM_ERROR("failed to map scu mmio\n");
+ return;
+ }
+
+ /* scu ipc: assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffdf, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ /* scu ipc: de-assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffff, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ iounmap(base);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int pipe = 1;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int refclk;
+ struct oaktrail_hdmi_clock clock;
+ u32 dspcntr, pipeconf, dpll, temp;
+ int dspcntr_reg = DSPBCNTR;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable dpll if necessary */
+ dpll = REG_READ(DPLL_CTRL);
+ if ((dpll & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ udelay(150);
+
+ /* Reset controller */
+ oaktrail_hdmi_reset(dev);
+
+ /* program and enable dpll */
+ refclk = 25000;
+ oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+ /* Set the DPLL */
+ dpll = REG_READ(DPLL_CTRL);
+ dpll &= ~DPLL_PDIV_MASK;
+ dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+ REG_WRITE(DPLL_CTRL, 0x00000008);
+ REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+ REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+ REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+ REG_WRITE(DPLL_UPDATE, 0x80000000);
+ REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+ udelay(150);
+
+ /* configure HDMI */
+ HDMI_WRITE(0x1004, 0x1fd);
+ HDMI_WRITE(0x2000, 0x1);
+ HDMI_WRITE(0x2008, 0x0);
+ HDMI_WRITE(0x3130, 0x8);
+ HDMI_WRITE(0x101c, 0x1800810);
+
+ temp = htotal_calculate(adjusted_mode);
+ REG_WRITE(htot_reg, temp);
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
+
+ REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ REG_WRITE(PCH_PIPEBCONF, pipeconf);
+ REG_READ(PCH_PIPEBCONF);
+ gma_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ gma_wait_for_vblank(dev);
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ u32 temp;
+
+ DRM_DEBUG_KMS("%s %d\n", __func__, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_OFF:
+ REG_WRITE(VGACNTRL, 0x80000000);
+
+ /* Disable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(DSPBCNTR);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ /* Disable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Disable LNW Pipes, etc */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ /* wait for pipe off */
+ udelay(150);
+
+ /* Disable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+
+ /* wait for dpll off */
+ udelay(150);
+
+ break;
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) != 0) {
+ REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+ temp = REG_READ(DPLL_CLK_ENABLE);
+ REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+ REG_READ(DPLL_CLK_ENABLE);
+ }
+ /* wait for dpll warm up */
+ udelay(150);
+
+ /* Enable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Enable LNW Pipe B */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ gma_wait_for_vblank(dev);
+
+ /* Enable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ gma_crtc_load_lut(crtc);
+ }
+
+ /* DSPARB */
+ REG_WRITE(DSPARB, 0x00003fbf);
+
+ /* FW1 */
+ REG_WRITE(0x70034, 0x3f880a0a);
+
+ /* FW2 */
+ REG_WRITE(0x70038, 0x0b060808);
+
+ /* FW4 */
+ REG_WRITE(0x70050, 0x08030404);
+
+ /* FW5 */
+ REG_WRITE(0x70054, 0x04040404);
+
+ /* LNC Chicken Bits - Squawk! */
+ REG_WRITE(0x70400, 0x4000);
+
+ return;
+}
+
+static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ static int dpms_mode = -1;
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ if (dpms_mode == mode)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ temp = 0x0;
+ else
+ temp = 0x99;
+
+ dpms_mode = mode;
+ HDMI_WRITE(HDMI_VIDEO_REG, temp);
+}
+
+static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ return MODE_OK;
+}
+
+static enum drm_connector_status
+oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HSR);
+ DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+
+ if ((temp & HDMI_DETECT_HDP) != 0)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ return status;
+}
+
+static const unsigned char raw_edid[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+ 0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+ 0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+ 0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+ 0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+ 0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+};
+
+static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct i2c_adapter *i2c_adap;
+ struct edid *edid;
+ int ret = 0;
+
+ /*
+ * FIXME: We need to figure this lot out. In theory we can
+ * read the EDID somehow but I've yet to find working reference
+ * code.
+ */
+ i2c_adap = i2c_get_adapter(3);
+ if (i2c_adap == NULL) {
+ DRM_ERROR("No ddc adapter available!\n");
+ edid = (struct edid *)raw_edid;
+ } else {
+ edid = (struct edid *)raw_edid;
+ /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+ }
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ }
+ return ret;
+}
+
+static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ oaktrail_hdmi_audio_enable(dev);
+ return;
+}
+
+static void oaktrail_hdmi_destroy(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
+ .dpms = oaktrail_hdmi_dpms,
+ .mode_fixup = gma_encoder_mode_fixup,
+ .prepare = gma_encoder_prepare,
+ .mode_set = oaktrail_hdmi_mode_set,
+ .commit = gma_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ oaktrail_hdmi_connector_helper_funcs = {
+ .get_modes = oaktrail_hdmi_get_modes,
+ .mode_valid = oaktrail_hdmi_mode_valid,
+ .best_encoder = gma_best_encoder,
+};
+
+static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = oaktrail_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = oaktrail_hdmi_destroy,
+};
+
+static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
+ .destroy = oaktrail_hdmi_enc_destroy,
+};
+
+void oaktrail_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
+ return;
+
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
+ goto failed_connector;
+
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
+ drm_connector_init(dev, connector,
+ &oaktrail_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, encoder,
+ &oaktrail_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+
+ gma_encoder->type = INTEL_OUTPUT_HDMI;
+ drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
+ drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_sysfs_connector_add(connector);
+ dev_info(dev->dev, "HDMI initialised.\n");
+
+ return;
+
+failed_connector:
+ kfree(gma_encoder);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+ { 0 }
+};
+
+void oaktrail_hdmi_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev;
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+ if (!pdev)
+ return;
+
+ hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
+ if (!hdmi_dev) {
+ dev_err(dev->dev, "failed to allocate memory\n");
+ goto out;
+ }
+
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hdmi controller\n");
+ goto free;
+ }
+
+ hdmi_dev->mmio = pci_resource_start(pdev, 0);
+ hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+ hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+ if (!hdmi_dev->regs) {
+ dev_err(dev->dev, "failed to map hdmi mmio\n");
+ goto free;
+ }
+
+ hdmi_dev->dev = pdev;
+ pci_set_drvdata(pdev, hdmi_dev);
+
+ /* Initialize i2c controller */
+ ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
+ if (ret)
+ dev_err(dev->dev, "HDMI I2C initialization failed\n");
+
+ dev_priv->hdmi_priv = hdmi_dev;
+ oaktrail_hdmi_audio_disable(dev);
+
+ dev_info(dev->dev, "HDMI hardware present.\n");
+
+ return;
+
+free:
+ kfree(hdmi_dev);
+out:
+ return;
+}
+
+void oaktrail_hdmi_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct pci_dev *pdev;
+
+ if (hdmi_dev) {
+ pdev = hdmi_dev->dev;
+ pci_set_drvdata(pdev, NULL);
+ oaktrail_hdmi_i2c_exit(pdev);
+ iounmap(hdmi_dev->regs);
+ kfree(hdmi_dev);
+ pci_dev_put(pdev);
+ }
+}
+
+/* save HDMI register state */
+void oaktrail_hdmi_save(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
+ int i;
+
+ /* dpll */
+ hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+ hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+ hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+ hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+ hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+
+ /* pipe B */
+ pipeb->conf = PSB_RVDC32(PIPEBCONF);
+ pipeb->src = PSB_RVDC32(PIPEBSRC);
+ pipeb->htotal = PSB_RVDC32(HTOTAL_B);
+ pipeb->hblank = PSB_RVDC32(HBLANK_B);
+ pipeb->hsync = PSB_RVDC32(HSYNC_B);
+ pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
+ pipeb->vblank = PSB_RVDC32(VBLANK_B);
+ pipeb->vsync = PSB_RVDC32(VSYNC_B);
+
+ hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+ hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+ hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+ hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+ hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B);
+ hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+ hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+ hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
+
+ /* plane */
+ pipeb->cntr = PSB_RVDC32(DSPBCNTR);
+ pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
+ pipeb->addr = PSB_RVDC32(DSPBBASE);
+ pipeb->surf = PSB_RVDC32(DSPBSURF);
+ pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
+ pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
+
+ /* cursor B */
+ regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+ regs->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+ regs->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+ /* save palette */
+ for (i = 0; i < 256; i++)
+ pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+}
+
+/* restore HDMI register state */
+void oaktrail_hdmi_restore(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
+ int i;
+
+ /* dpll */
+ PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+ PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+ PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+ DRM_UDELAY(150);
+
+ /* pipe */
+ PSB_WVDC32(pipeb->src, PIPEBSRC);
+ PSB_WVDC32(pipeb->htotal, HTOTAL_B);
+ PSB_WVDC32(pipeb->hblank, HBLANK_B);
+ PSB_WVDC32(pipeb->hsync, HSYNC_B);
+ PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
+ PSB_WVDC32(pipeb->vblank, VBLANK_B);
+ PSB_WVDC32(pipeb->vsync, VSYNC_B);
+
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+ PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
+
+ PSB_WVDC32(pipeb->conf, PIPEBCONF);
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+
+ /* plane */
+ PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
+ PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
+ PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
+ PSB_WVDC32(pipeb->cntr, DSPBCNTR);
+ PSB_WVDC32(pipeb->surf, DSPBSURF);
+
+ /* cursor B */
+ PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
+ PSB_WVDC32(regs->saveDSPBCURSOR_POS, CURBPOS);
+ PSB_WVDC32(regs->saveDSPBCURSOR_BASE, CURBBASE);
+
+ /* restore palette */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
new file mode 100644
index 00000000000..e2810706114
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_DETECT_HDP (1 << 6)
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+#define HDMI_HICR 0x1004
+#define HDMI_INTR_I2C_ERROR (1 << 4)
+#define HDMI_INTR_I2C_FULL (1 << 3)
+#define HDMI_INTR_I2C_DONE (1 << 2)
+#define HDMI_INTR_HPD (1 << 0)
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_HI2CRDB0 0x1200
+#define HDMI_HI2CHCR 0x1240
+#define HI2C_HDCP_WRITE (0 << 2)
+#define HI2C_HDCP_RI_READ (1 << 2)
+#define HI2C_HDCP_READ (2 << 2)
+#define HI2C_EDID_READ (3 << 2)
+#define HI2C_READ_CONTINUE (1 << 1)
+#define HI2C_ENABLE_TRANSACTION (1 << 0)
+
+#define HDMI_ICRH 0x1100
+#define HDMI_HI2CTDR0 0x1244
+#define HDMI_HI2CTDR1 0x1248
+
+#define I2C_STAT_INIT 0
+#define I2C_READ_DONE 1
+#define I2C_TRANSACTION_DONE 2
+
+struct hdmi_i2c_dev {
+ struct i2c_adapter *adap;
+ struct mutex i2c_lock;
+ struct completion complete;
+ int status;
+ struct i2c_msg *msg;
+ int buf_offset;
+};
+
+static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HICR);
+ temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+ HDMI_WRITE(HDMI_HICR, temp);
+ HDMI_READ(HDMI_HICR);
+}
+
+static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ HDMI_WRITE(HDMI_HICR, 0x0);
+ HDMI_READ(HDMI_HICR);
+}
+
+static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ i2c_dev->status = I2C_STAT_INIT;
+ i2c_dev->msg = pmsg;
+ i2c_dev->buf_offset = 0;
+ reinit_completion(&i2c_dev->complete);
+
+ /* Enable I2C transaction */
+ temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+ HDMI_WRITE(HDMI_HI2CHCR, temp);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ while (i2c_dev->status != I2C_TRANSACTION_DONE)
+ wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+ 10 * HZ);
+
+ return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ /*
+ * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+ */
+ return 0;
+}
+
+static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
+ struct i2c_msg *pmsg,
+ int num)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ int i;
+
+ mutex_lock(&i2c_dev->i2c_lock);
+
+ /* Enable i2c unit */
+ HDMI_WRITE(HDMI_ICRH, 0x00008760);
+
+ /* Enable irq */
+ hdmi_i2c_irq_enable(hdmi_dev);
+ for (i = 0; i < num; i++) {
+ if (pmsg->len && pmsg->buf) {
+ if (pmsg->flags & I2C_M_RD)
+ xfer_read(adap, pmsg);
+ else
+ xfer_write(adap, pmsg);
+ }
+ pmsg++; /* next message */
+ }
+
+ /* Disable irq */
+ hdmi_i2c_irq_disable(hdmi_dev);
+
+ mutex_unlock(&i2c_dev->i2c_lock);
+
+ return i;
+}
+
+static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
+ .master_xfer = oaktrail_hdmi_i2c_access,
+ .functionality = oaktrail_hdmi_i2c_func,
+};
+
+static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
+ .name = "oaktrail_hdmi_i2c",
+ .nr = 3,
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_DDC,
+ .algo = &oaktrail_hdmi_i2c_algorithm,
+};
+
+static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ struct i2c_msg *msg = i2c_dev->msg;
+ u8 *buf = msg->buf;
+ u32 temp;
+ int i, offset;
+
+ offset = i2c_dev->buf_offset;
+ for (i = 0; i < 0x10; i++) {
+ temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+ memcpy(buf + (offset + i * 4), &temp, 4);
+ }
+ i2c_dev->buf_offset += (0x10 * 4);
+
+ /* clearing read buffer full intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+ HDMI_READ(HDMI_HISR);
+
+ /* continue read transaction */
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_READ_DONE;
+ return;
+}
+
+static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ /* clear transaction done intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+ HDMI_READ(HDMI_HISR);
+
+
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_TRANSACTION_DONE;
+ return;
+}
+
+static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = dev;
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 stat;
+
+ stat = HDMI_READ(HDMI_HISR);
+
+ if (stat & HDMI_INTR_HPD) {
+ HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+ HDMI_READ(HDMI_HISR);
+ }
+
+ if (stat & HDMI_INTR_I2C_FULL)
+ hdmi_i2c_read(hdmi_dev);
+
+ if (stat & HDMI_INTR_I2C_DONE)
+ hdmi_i2c_transaction_done(hdmi_dev);
+
+ complete(&i2c_dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * choose alternate function 2 of GPIO pin 52, 53,
+ * which is used by HDMI I2C logic
+ */
+static void oaktrail_hdmi_i2c_gpio_fix(void)
+{
+ void __iomem *base;
+ unsigned int gpio_base = 0xff12c000;
+ int gpio_len = 0x1000;
+ u32 temp;
+
+ base = ioremap((resource_size_t)gpio_base, gpio_len);
+ if (base == NULL) {
+ DRM_ERROR("gpio ioremap fail\n");
+ return;
+ }
+
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+ writel((temp | 0x00000a00), (base + 0x44));
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+
+ iounmap(base);
+}
+
+int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+ int ret;
+
+ hdmi_dev = pci_get_drvdata(dev);
+
+ i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+ if (i2c_dev == NULL) {
+ DRM_ERROR("Can't allocate interface\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+ i2c_dev->status = I2C_STAT_INIT;
+ init_completion(&i2c_dev->complete);
+ mutex_init(&i2c_dev->i2c_lock);
+ i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
+ hdmi_dev->i2c_dev = i2c_dev;
+
+ /* Enable HDMI I2C function on gpio */
+ oaktrail_hdmi_i2c_gpio_fix();
+
+ /* request irq */
+ ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
+ oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+ if (ret) {
+ DRM_ERROR("Failed to request IRQ for I2C controller\n");
+ goto err;
+ }
+
+ /* Adapter registration */
+ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+ return ret;
+
+err:
+ kfree(i2c_dev);
+exit:
+ return ret;
+}
+
+void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ hdmi_dev = pci_get_drvdata(dev);
+ i2c_del_adapter(&oaktrail_hdmi_i2c_adapter);
+
+ i2c_dev = hdmi_dev->i2c_dev;
+ kfree(i2c_dev);
+ free_irq(dev->irq, hdmi_dev);
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
new file mode 100644
index 00000000000..9b099468a5d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <asm/intel-mid.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/* The max/min PWM frequency in BPCR[31:17] - */
+/* The smallest number is 1 (not 0) that can fit in the
+ * 15-bit field of the and then*/
+/* shifts to the left by one bit to get the actual 16-bit
+ * value that the 15-bits correspond to.*/
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BRIGHTNESS_MAX_LEVEL 100
+
+/**
+ * Sets the power state for the panel.
+ */
+static void oaktrail_lvds_set_power(struct drm_device *dev,
+ struct gma_encoder *gma_encoder,
+ bool on)
+{
+ u32 pp_status;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+ dev_priv->is_lvds_on = true;
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, true);
+ } else {
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, false);
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ dev_priv->is_lvds_on = false;
+ pm_request_idle(&dev->pdev->dev);
+ }
+ gma_power_end(dev);
+}
+
+static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ oaktrail_lvds_set_power(dev, gma_encoder, true);
+ else
+ oaktrail_lvds_set_power(dev, gma_encoder, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector = NULL;
+ struct drm_crtc *crtc = encoder->crtc;
+ u32 lvds_port;
+ uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+ lvds_port = (REG_READ(LVDS) &
+ (~LVDS_PIPEB_SELECT)) |
+ LVDS_PORT_EN |
+ LVDS_BORDER_EN;
+
+ /* If the firmware says dither on Moorestown, or the BIOS does
+ on Oaktrail then enable dithering */
+ if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(LVDS, lvds_port);
+
+ /* Find the connector we're trying to set up */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+ }
+
+ if (!connector) {
+ DRM_ERROR("Couldn't find connector when setting mode");
+ return;
+ }
+
+ drm_object_property_get_value(
+ &connector->base,
+ dev->mode_config.scaling_mode_property,
+ &v);
+
+ if (v == DRM_MODE_SCALE_NO_SCALE)
+ REG_WRITE(PFIT_CONTROL, 0);
+ else if (v == DRM_MODE_SCALE_ASPECT) {
+ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
+ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
+ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
+ (mode->hdisplay * adjusted_mode->crtc_vdisplay))
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+ else if ((adjusted_mode->crtc_hdisplay *
+ mode->vdisplay) > (mode->hdisplay *
+ adjusted_mode->crtc_vdisplay))
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+ PFIT_SCALING_MODE_PILLARBOX);
+ else
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+ PFIT_SCALING_MODE_LETTERBOX);
+ } else
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+ } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+
+ gma_power_end(dev);
+}
+
+static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+ oaktrail_lvds_set_power(dev, gma_encoder, false);
+ gma_power_end(dev);
+}
+
+static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 ret;
+
+ if (gma_power_begin(dev, false)) {
+ ret = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ ret = ((dev_priv->regs.saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return ret;
+}
+
+static void oaktrail_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ oaktrail_lvds_get_max_backlight(dev);
+ oaktrail_lvds_set_power(dev, gma_encoder, true);
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
+ .dpms = oaktrail_lvds_dpms,
+ .mode_fixup = psb_intel_lvds_mode_fixup,
+ .prepare = oaktrail_lvds_prepare,
+ .mode_set = oaktrail_lvds_mode_set,
+ .commit = oaktrail_lvds_commit,
+};
+
+/* Returns the panel fixed mode from configuration. */
+
+static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct drm_display_mode *mode = NULL;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+
+ mode_dev->panel_fixed_mode = NULL;
+
+ /* Use the firmware provided data on Moorestown */
+ if (dev_priv->has_gct) {
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return;
+
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+#if 0
+ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
+ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
+ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
+ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
+ printk(KERN_INFO "htotal is %d\n", mode->htotal);
+ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
+ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
+ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
+ printk(KERN_INFO "clock is %d\n", mode->clock);
+#endif
+ mode_dev->panel_fixed_mode = mode;
+ }
+
+ /* Use the BIOS VBT mode if available */
+ if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
+ mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
+ mode_dev->vbt_mode);
+
+ /* Then try the LVDS VBT mode */
+ if (mode_dev->panel_fixed_mode == NULL)
+ if (dev_priv->lfp_lvds_vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev,
+ dev_priv->lfp_lvds_vbt_mode);
+
+ /* If we still got no mode then bail */
+ if (mode_dev->panel_fixed_mode == NULL)
+ return;
+
+ drm_mode_set_name(mode_dev->panel_fixed_mode);
+ drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
+}
+
+/**
+ * oaktrail_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct edid *edid;
+ struct i2c_adapter *i2c_adap;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
+ return;
+
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
+ goto failed_connector;
+
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
+ dev_priv->is_lvds_on = true;
+ drm_connector_init(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &psb_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_object_attach_property(&connector->base,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ mode_dev->panel_wants_dither = false;
+ if (dev_priv->has_gct)
+ mode_dev->panel_wants_dither = (dev_priv->gct_data.
+ Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+ if (dev_priv->lvds_dither)
+ mode_dev->panel_wants_dither = 1;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ mutex_lock(&dev->mode_config.mutex);
+ i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
+ if (i2c_adap == NULL)
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ if (i2c_adap) {
+ edid = drm_get_edid(connector, i2c_adap);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+ }
+ /*
+ * If we didn't get EDID, try geting panel timing
+ * from configuration data
+ */
+ oaktrail_lvds_get_configuration_mode(dev, mode_dev);
+
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
+ if (gma_encoder->ddc_bus)
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+
+/* failed_ddc: */
+
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+ kfree(gma_connector);
+failed_connector:
+ kfree(gma_encoder);
+}
+
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
new file mode 100644
index 00000000000..ab696ca7eee
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/acpi.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI (1<<0)
+#define MBOX_SWSCI (1<<1)
+#define MBOX_ASLE (1<<2)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ /*FIXME: add it later*/
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
+} __packed;
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM (1 << 0)
+#define ASLE_SET_BACKLIGHT (1 << 1)
+#define ASLE_SET_PFIT (1 << 2)
+#define ASLE_SET_PWM_FREQ (1 << 3)
+#define ASLE_REQ_MSK 0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+static struct psb_intel_opregion *system_opregion;
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct backlight_device *bd = dev_priv->backlight_device;
+
+ DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (bd == NULL)
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
+ int max = bd->props.max_brightness;
+ gma_backlight_set(dev, bclp * max / 255);
+ }
+
+ asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+static void psb_intel_opregion_asle_work(struct work_struct *work)
+{
+ struct psb_intel_opregion *opregion =
+ container_of(work, struct psb_intel_opregion, asle_work);
+ struct drm_psb_private *dev_priv =
+ container_of(opregion, struct drm_psb_private, opregion);
+ struct opregion_asle *asle = opregion->asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+ if (!asle_req) {
+ DRM_DEBUG_DRIVER("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
+
+ asle->aslc = asle_stat;
+
+}
+
+void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->opregion.asle)
+ schedule_work(&dev_priv->opregion.asle_work);
+}
+
+#define ASLE_ALS_EN (1<<0)
+#define ASLE_BLC_EN (1<<1)
+#define ASLE_PFIT_EN (1<<2)
+#define ASLE_PFMB_EN (1<<3)
+
+void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+
+ if (asle && system_opregion ) {
+ /* Don't do this on Medfield or other non PC like devices, they
+ use the bit for something different altogether */
+ psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+
+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
+ | ASLE_PFMB_EN;
+ asle->ardy = 1;
+ }
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+
+static int psb_intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ /* The only video events relevant to opregion are 0x80. These indicate
+ either a docking event, lid switch or display switch request. In
+ Linux, these are handled by the dock, button and video drivers.
+ We might want to fix the video driver to be opregion-aware in
+ future, but right now we just indicate to the firmware that the
+ request has been handled */
+
+ struct opregion_acpi *acpi;
+
+ if (!system_opregion)
+ return NOTIFY_DONE;
+
+ acpi = system_opregion->acpi;
+ acpi->csts = 0;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block psb_intel_opregion_notifier = {
+ .notifier_call = psb_intel_opregion_video_event,
+};
+
+void psb_intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+}
+
+void psb_intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+
+ cancel_work_sync(&opregion->asle_work);
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+
+int psb_intel_opregion_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+ u32 opregion_phy, mboxes;
+ void __iomem *base;
+ int err = 0;
+
+ pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+ if (opregion_phy == 0) {
+ DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
+ return -ENOTSUPP;
+ }
+
+ INIT_WORK(&opregion->asle_work, psb_intel_opregion_asle_work);
+
+ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
+ base = acpi_os_ioremap(opregion_phy, 8*1024);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+ opregion->lid_state = base + ACPI_CLID;
+
+ mboxes = opregion->header->mboxes;
+ if (mboxes & MBOX_ACPI) {
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ }
+
+ if (mboxes & MBOX_ASLE) {
+ DRM_DEBUG_DRIVER("ASLE supported\n");
+ opregion->asle = base + OPREGION_ASLE_OFFSET;
+ }
+
+ return 0;
+
+err_out:
+ iounmap(base);
+ return err;
+}
+
diff --git a/drivers/gpu/drm/gma500/opregion.h b/drivers/gpu/drm/gma500/opregion.h
new file mode 100644
index 00000000000..4a90f8b0e16
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#if defined(CONFIG_ACPI)
+extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
+extern void psb_intel_opregion_init(struct drm_device *dev);
+extern void psb_intel_opregion_fini(struct drm_device *dev);
+extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
+
+#else
+
+extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_init(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_fini(struct drm_device *dev)
+{
+}
+
+extern inline int psb_intel_opregion_setup(struct drm_device *dev)
+{
+ return 0;
+}
+
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
new file mode 100644
index 00000000000..b6b135fcd59
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.c
@@ -0,0 +1,332 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ * Alan Cox <alan@linux.intel.com>
+ */
+
+#include "power.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+static struct mutex power_mutex; /* Serialize power ops */
+static spinlock_t power_ctrl_lock; /* Serialize power claim */
+
+/**
+ * gma_power_init - initialise power manager
+ * @dev: our device
+ *
+ * Set up for power management tracking of our hardware.
+ */
+void gma_power_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* FIXME: Move APM/OSPM base into relevant device code */
+ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
+ dev_priv->ospm_base &= 0xffff;
+
+ dev_priv->display_power = true; /* We start active */
+ dev_priv->display_count = 0; /* Currently no users */
+ dev_priv->suspended = false; /* And not suspended */
+ spin_lock_init(&power_ctrl_lock);
+ mutex_init(&power_mutex);
+
+ if (dev_priv->ops->init_pm)
+ dev_priv->ops->init_pm(dev);
+}
+
+/**
+ * gma_power_uninit - end power manager
+ * @dev: device to end for
+ *
+ * Undo the effects of gma_power_init
+ */
+void gma_power_uninit(struct drm_device *dev)
+{
+ pm_runtime_disable(&dev->pdev->dev);
+ pm_runtime_set_suspended(&dev->pdev->dev);
+}
+
+/**
+ * gma_suspend_display - suspend the display logic
+ * @dev: our DRM device
+ *
+ * Suspend the display logic of the graphics interface
+ */
+static void gma_suspend_display(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->suspended)
+ return;
+ dev_priv->ops->save_regs(dev);
+ dev_priv->ops->power_down(dev);
+ dev_priv->display_power = false;
+}
+
+/**
+ * gma_resume_display - resume display side logic
+ *
+ * Resume the display hardware restoring state and enabling
+ * as necessary.
+ */
+static void gma_resume_display(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* turn on the display power island */
+ dev_priv->ops->power_up(dev);
+ dev_priv->suspended = false;
+ dev_priv->display_power = true;
+
+ PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+ psb_gtt_restore(dev); /* Rebuild our GTT mappings */
+ dev_priv->ops->restore_regs(dev);
+}
+
+/**
+ * gma_suspend_pci - suspend PCI side
+ * @pdev: PCI device
+ *
+ * Perform the suspend processing on our PCI device state
+ */
+static void gma_suspend_pci(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int bsm, vbt;
+
+ if (dev_priv->suspended)
+ return;
+
+ pci_save_state(pdev);
+ pci_read_config_dword(pdev, 0x5C, &bsm);
+ dev_priv->regs.saveBSM = bsm;
+ pci_read_config_dword(pdev, 0xFC, &vbt);
+ dev_priv->regs.saveVBT = vbt;
+ pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+ pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ dev_priv->suspended = true;
+}
+
+/**
+ * gma_resume_pci - resume helper
+ * @dev: our PCI device
+ *
+ * Perform the resume processing on our PCI device state - rewrite
+ * register state and re-enable the PCI device
+ */
+static bool gma_resume_pci(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->suspended)
+ return true;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
+ pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
+ /* restoring MSI address and data in PCIx space */
+ pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+ pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+ ret = pci_enable_device(pdev);
+
+ if (ret != 0)
+ dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
+ else
+ dev_priv->suspended = false;
+ return !dev_priv->suspended;
+}
+
+/**
+ * gma_power_suspend - bus callback for suspend
+ * @pdev: our PCI device
+ * @state: suspend type
+ *
+ * Called back by the PCI layer during a suspend of the system. We
+ * perform the necessary shut down steps and save enough state that
+ * we can undo this when resume is called.
+ */
+int gma_power_suspend(struct device *_dev)
+{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&power_mutex);
+ if (!dev_priv->suspended) {
+ if (dev_priv->display_count) {
+ mutex_unlock(&power_mutex);
+ dev_err(dev->dev, "GPU hardware busy, cannot suspend\n");
+ return -EBUSY;
+ }
+ psb_irq_uninstall(dev);
+ gma_suspend_display(dev);
+ gma_suspend_pci(pdev);
+ }
+ mutex_unlock(&power_mutex);
+ return 0;
+}
+
+/**
+ * gma_power_resume - resume power
+ * @pdev: PCI device
+ *
+ * Resume the PCI side of the graphics and then the displays
+ */
+int gma_power_resume(struct device *_dev)
+{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ mutex_lock(&power_mutex);
+ gma_resume_pci(pdev);
+ gma_resume_display(pdev);
+ psb_irq_preinstall(dev);
+ psb_irq_postinstall(dev);
+ mutex_unlock(&power_mutex);
+ return 0;
+}
+
+/**
+ * gma_power_is_on - returne true if power is on
+ * @dev: our DRM device
+ *
+ * Returns true if the display island power is on at this moment
+ */
+bool gma_power_is_on(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->display_power;
+}
+
+/**
+ * gma_power_begin - begin requiring power
+ * @dev: our DRM device
+ * @force_on: true to force power on
+ *
+ * Begin an action that requires the display power island is enabled.
+ * We refcount the islands.
+ */
+bool gma_power_begin(struct drm_device *dev, bool force_on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&power_ctrl_lock, flags);
+ /* Power already on ? */
+ if (dev_priv->display_power) {
+ dev_priv->display_count++;
+ pm_runtime_get(&dev->pdev->dev);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return true;
+ }
+ if (force_on == false)
+ goto out_false;
+
+ /* Ok power up needed */
+ ret = gma_resume_pci(dev->pdev);
+ if (ret == 0) {
+ psb_irq_preinstall(dev);
+ psb_irq_postinstall(dev);
+ pm_runtime_get(&dev->pdev->dev);
+ dev_priv->display_count++;
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return true;
+ }
+out_false:
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return false;
+}
+
+/**
+ * gma_power_end - end use of power
+ * @dev: Our DRM device
+ *
+ * Indicate that one of our gma_power_begin() requested periods when
+ * the diplay island power is needed has completed.
+ */
+void gma_power_end(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ spin_lock_irqsave(&power_ctrl_lock, flags);
+ dev_priv->display_count--;
+ WARN_ON(dev_priv->display_count < 0);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ pm_runtime_put(&dev->pdev->dev);
+}
+
+int psb_runtime_suspend(struct device *dev)
+{
+ return gma_power_suspend(dev);
+}
+
+int psb_runtime_resume(struct device *dev)
+{
+ return gma_power_resume(dev);
+}
+
+int psb_runtime_idle(struct device *dev)
+{
+ struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_psb_private *dev_priv = drmdev->dev_private;
+ if (dev_priv->display_count)
+ return 0;
+ else
+ return 1;
+}
+
+int gma_power_thaw(struct device *_dev)
+{
+ return gma_power_resume(_dev);
+}
+
+int gma_power_freeze(struct device *_dev)
+{
+ return gma_power_suspend(_dev);
+}
+
+int gma_power_restore(struct device *_dev)
+{
+ return gma_power_resume(_dev);
+}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
new file mode 100644
index 00000000000..56d8708bd41
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.h
@@ -0,0 +1,70 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ * Alan Cox <alan@linux.intel.com>
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include <linux/pci.h>
+#include <drm/drmP.h>
+
+void gma_power_init(struct drm_device *dev);
+void gma_power_uninit(struct drm_device *dev);
+
+/*
+ * The kernel bus power management will call these functions
+ */
+int gma_power_suspend(struct device *dev);
+int gma_power_resume(struct device *dev);
+int gma_power_thaw(struct device *dev);
+int gma_power_freeze(struct device *dev);
+int gma_power_restore(struct device *_dev);
+
+/*
+ * These are the functions the driver should use to wrap all hw access
+ * (i.e. register reads and writes)
+ */
+bool gma_power_begin(struct drm_device *dev, bool force);
+void gma_power_end(struct drm_device *dev);
+
+/*
+ * Use this function to do an instantaneous check for if the hw is on.
+ * Only use this in cases where you know the mutex is already held such
+ * as in irq install/uninstall and you need to
+ * prevent a deadlock situation. Otherwise use gma_power_begin().
+ */
+bool gma_power_is_on(struct drm_device *dev);
+
+/*
+ * GFX-Runtime PM callbacks
+ */
+int psb_runtime_suspend(struct device *dev);
+int psb_runtime_resume(struct device *dev);
+int psb_runtime_idle(struct device *dev);
+
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
new file mode 100644
index 00000000000..07df7d4eea7
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -0,0 +1,360 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "psb_device.h"
+#include "gma_device.h"
+
+static int psb_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+ psb_intel_sdvo_init(dev, SDVOB);
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR 10
+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+
+static int psb_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return psb_brightness;
+}
+
+
+static int psb_backlight_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ /* u32 bl_max_freq; */
+ /* unsigned long value; */
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ /* get bl_max_freq and pol from dev_priv*/
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+ return -ENOENT;
+ }
+ bl_max_freq = dev_priv->lvds_bl->freq;
+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+ return -ERANGE;
+ else {
+ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+ }
+ return 0;
+}
+
+static int psb_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(psb_backlight_device);
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ psb_intel_lvds_set_brightness(dev, level);
+ psb_brightness = level;
+ return 0;
+}
+
+static const struct backlight_ops psb_ops = {
+ .get_brightness = psb_get_brightness,
+ .update_status = psb_set_brightness,
+};
+
+static int psb_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ psb_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &psb_ops, &props);
+ if (IS_ERR(psb_backlight_device))
+ return PTR_ERR(psb_backlight_device);
+
+ ret = psb_backlight_setup(dev);
+ if (ret < 0) {
+ backlight_device_unregister(psb_backlight_device);
+ psb_backlight_device = NULL;
+ return ret;
+ }
+ psb_backlight_device->props.brightness = 100;
+ psb_backlight_device->props.max_brightness = 100;
+ backlight_update_status(psb_backlight_device);
+ dev_priv->backlight_device = psb_backlight_device;
+
+ /* This must occur after the backlight is properly initialised */
+ psb_lid_timer_init(dev_priv);
+
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Poulsbo specific chip logic and low level methods
+ * for power management
+ */
+
+static void psb_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+ gating &= ~3; /* Disable 2D clock gating */
+ gating |= 1;
+ PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+ PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+/**
+ * psb_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int psb_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct psb_state *regs = &dev_priv->regs.psb;
+
+ /* Display arbitration control + watermarks */
+ regs->saveDSPARB = PSB_RVDC32(DSPARB);
+ regs->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ regs->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ regs->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ regs->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ regs->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ regs->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Save crtc and output state */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->save(crtc);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->funcs->save)
+ connector->funcs->save(connector);
+
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
+/**
+ * psb_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int psb_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct psb_state *regs = &dev_priv->regs.psb;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(regs->saveDSPARB, DSPARB);
+ PSB_WVDC32(regs->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(regs->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(regs->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(regs->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(regs->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(regs->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(regs->saveCHICKENBIT, DSPCHICKENBIT);
+
+ /*make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->restore(crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->funcs->restore)
+ connector->funcs->restore(connector);
+
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
+static int psb_power_down(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int psb_power_up(struct drm_device *dev)
+{
+ return 0;
+}
+
+/* Poulsbo */
+static const struct psb_offset psb_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
+static int psb_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->regmap = psb_regmap;
+ gma_get_core_freq(dev);
+ gma_intel_setup_gmbus(dev);
+ psb_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ return 0;
+}
+
+static void psb_chip_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_takedown(dev_priv);
+ gma_intel_teardown_gmbus(dev);
+}
+
+const struct psb_ops psb_chip_ops = {
+ .name = "Poulsbo",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .hdmi_mask = (1 << 0),
+ .lvds_mask = (1 << 1),
+ .sdvo_mask = (1 << 0),
+ .cursor_needs_phys = 1,
+ .sgx_offset = PSB_SGX_OFFSET,
+ .chip_setup = psb_chip_setup,
+ .chip_teardown = psb_chip_teardown,
+
+ .crtc_helper = &psb_intel_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+ .clock_funcs = &psb_clock_funcs,
+
+ .output_init = psb_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = psb_backlight_init,
+#endif
+
+ .init_pm = psb_init_pm,
+ .save_regs = psb_save_display_registers,
+ .restore_regs = psb_restore_display_registers,
+ .power_down = psb_power_down,
+ .power_up = psb_power_up,
+};
+
diff --git a/drivers/gpu/drm/gma500/psb_device.h b/drivers/gpu/drm/gma500/psb_device.h
new file mode 100644
index 00000000000..35e304c7f85
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_device.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright © 2013 Patrik Jakobsson
+ * Copyright © 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _PSB_DEVICE_H_
+#define _PSB_DEVICE_H_
+
+extern const struct gma_clock_funcs psb_clock_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644
index 00000000000..6e8fe9ec02b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -0,0 +1,529 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drv.h"
+#include "framebuffer.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include <drm/drm_pciids.h>
+#include "power.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <linux/module.h>
+
+static struct drm_driver driver;
+static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+/*
+ * The table below contains a mapping of the PCI vendor ID and the PCI Device ID
+ * to the different groups of PowerVR 5-series chip designs
+ *
+ * 0x8086 = Intel Corporation
+ *
+ * PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
+ * PowerVR SGX535 - Moorestown - Intel GMA 600
+ * PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
+ * PowerVR SGX540 - Medfield - Intel Atom Z2460
+ * PowerVR SGX544MP2 - Medfield -
+ * PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
+ * PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
+ * N2800
+ */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+ { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_GMA600)
+ { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+ { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
+#endif
+#if defined(CONFIG_DRM_MEDFIELD)
+ { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+ { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
+#endif
+#if defined(CONFIG_DRM_GMA3600)
+ { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+ { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
+#endif
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+/*
+ * Standard IOCTLs.
+ */
+static const struct drm_ioctl_desc psb_ioctls[] = {
+};
+
+static void psb_driver_lastclose(struct drm_device *dev)
+{
+ int ret;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = dev_priv->fbdev;
+
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->psb_fb_helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+
+ return;
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ uint32_t stolen_gtt;
+
+ if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+ dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+ return -EINVAL;
+ }
+
+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+ dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+ (stolen_gtt << PAGE_SHIFT) * 1024;
+
+ spin_lock_init(&dev_priv->irqmask_lock);
+ spin_lock_init(&dev_priv->lock_2d);
+
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+ PSB_RSGX32(PSB_CR_BIF_BANK1);
+
+ /* Do not bypass any MMU access, let them pagefault instead */
+ PSB_WSGX32((PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_MMU_ER_MASK),
+ PSB_CR_BIF_CTRL);
+ PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+ psb_spank(dev_priv);
+
+ /* mmu_gatt ?? */
+ PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+ PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); /* Post */
+
+ return 0;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* TODO: Kill vblank etc here */
+
+ if (dev_priv) {
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
+
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
+
+ psb_intel_opregion_fini(dev);
+
+ if (dev_priv->pf_pd) {
+ psb_mmu_free_pagedir(dev_priv->pf_pd);
+ dev_priv->pf_pd = NULL;
+ }
+ if (dev_priv->mmu) {
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ down_read(&pg->sem);
+ psb_mmu_remove_pfn_sequence(
+ psb_mmu_get_default_pd
+ (dev_priv->mmu),
+ pg->mmu_gatt_start,
+ dev_priv->vram_stolen_size >> PAGE_SHIFT);
+ up_read(&pg->sem);
+ psb_mmu_driver_takedown(dev_priv->mmu);
+ dev_priv->mmu = NULL;
+ }
+ psb_gtt_takedown(dev);
+ if (dev_priv->scratch_page) {
+ set_pages_wb(dev_priv->scratch_page, 1);
+ __free_page(dev_priv->scratch_page);
+ dev_priv->scratch_page = NULL;
+ }
+ if (dev_priv->vdc_reg) {
+ iounmap(dev_priv->vdc_reg);
+ dev_priv->vdc_reg = NULL;
+ }
+ if (dev_priv->sgx_reg) {
+ iounmap(dev_priv->sgx_reg);
+ dev_priv->sgx_reg = NULL;
+ }
+ if (dev_priv->aux_reg) {
+ iounmap(dev_priv->aux_reg);
+ dev_priv->aux_reg = NULL;
+ }
+ if (dev_priv->aux_pdev)
+ pci_dev_put(dev_priv->aux_pdev);
+
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
+
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+ }
+ gma_power_uninit(dev);
+ return 0;
+}
+
+static int psb_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_psb_private *dev_priv;
+ unsigned long resource_start, resource_len;
+ unsigned long irqflags;
+ int ret = -ENOMEM;
+ struct drm_connector *connector;
+ struct gma_encoder *gma_encoder;
+ struct psb_gtt *pg;
+
+ /* allocating and initializing driver private data */
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev_priv->ops = (struct psb_ops *)flags;
+ dev_priv->dev = dev;
+ dev->dev_private = (void *) dev_priv;
+
+ pg = &dev_priv->gtt;
+
+ pci_set_master(dev->pdev);
+
+ dev_priv->num_pipe = dev_priv->ops->pipes;
+
+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+ dev_priv->vdc_reg =
+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+ if (!dev_priv->vdc_reg)
+ goto out_err;
+
+ dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+ PSB_SGX_SIZE);
+ if (!dev_priv->sgx_reg)
+ goto out_err;
+
+ if (IS_MRST(dev)) {
+ dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
+
+ if (dev_priv->aux_pdev) {
+ resource_start = pci_resource_start(dev_priv->aux_pdev,
+ PSB_AUX_RESOURCE);
+ resource_len = pci_resource_len(dev_priv->aux_pdev,
+ PSB_AUX_RESOURCE);
+ dev_priv->aux_reg = ioremap_nocache(resource_start,
+ resource_len);
+ if (!dev_priv->aux_reg)
+ goto out_err;
+
+ DRM_DEBUG_KMS("Found aux vdc");
+ } else {
+ /* Couldn't find the aux vdc so map to primary vdc */
+ dev_priv->aux_reg = dev_priv->vdc_reg;
+ DRM_DEBUG_KMS("Couldn't find aux pci device");
+ }
+ dev_priv->gmbus_reg = dev_priv->aux_reg;
+ } else {
+ dev_priv->gmbus_reg = dev_priv->vdc_reg;
+ }
+
+ psb_intel_opregion_setup(dev);
+
+ ret = dev_priv->ops->chip_setup(dev);
+ if (ret)
+ goto out_err;
+
+ /* Init OSPM support */
+ gma_power_init(dev);
+
+ ret = -ENOMEM;
+
+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+ if (!dev_priv->scratch_page)
+ goto out_err;
+
+ set_pages_uc(dev_priv->scratch_page, 1);
+
+ ret = psb_gtt_init(dev, 0);
+ if (ret)
+ goto out_err;
+
+ dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
+ if (!dev_priv->mmu)
+ goto out_err;
+
+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+ if (!dev_priv->pf_pd)
+ goto out_err;
+
+ ret = psb_do_init(dev);
+ if (ret)
+ return ret;
+
+ /* Add stolen memory to SGX MMU */
+ down_read(&pg->sem);
+ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
+ dev_priv->stolen_base >> PAGE_SHIFT,
+ pg->gatt_start,
+ pg->stolen_size >> PAGE_SHIFT, 0);
+ up_read(&pg->sem);
+
+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+ PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+ PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+
+ acpi_video_register();
+
+ /* Setup vertical blanking handling */
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
+ if (ret)
+ goto out_err;
+
+ /*
+ * Install interrupt handlers prior to powering off SGX or else we will
+ * crash.
+ */
+ dev_priv->vdc_irq_mask = 0;
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+ dev_priv->pipestat[2] = 0;
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ drm_irq_install(dev, dev->pdev->irq);
+
+ dev->vblank_disable_allowed = true;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+ psb_modeset_init(dev);
+ psb_fbdev_init(dev);
+ drm_kms_helper_poll_init(dev);
+
+ /* Only add backlight support if we have LVDS output */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ gma_encoder = gma_attached_encoder(connector);
+
+ switch (gma_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ case INTEL_OUTPUT_MIPI:
+ ret = gma_backlight_init(dev);
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+ psb_intel_opregion_enable_asle(dev);
+#if 0
+ /* Enable runtime pm at last */
+ pm_runtime_enable(&dev->pdev->dev);
+ pm_runtime_set_active(&dev->pdev->dev);
+#endif
+ /* Intel drm driver load is done, continue doing pvr load */
+ return 0;
+out_err:
+ psb_driver_unload(dev);
+ return ret;
+}
+
+static int psb_driver_device_is_agp(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ if (bd) {
+ bd->props.brightness = bd->ops->get_brightness(bd);
+ backlight_update_status(bd);
+ }
+#endif
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ static unsigned int runtime_allowed;
+
+ if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
+ runtime_allowed++;
+ pm_runtime_allow(&dev->pdev->dev);
+ dev_priv->rpm_enabled = 1;
+ }
+ return drm_ioctl(filp, cmd, arg);
+ /* FIXME: do we need to wrap the other side of this */
+}
+
+/*
+ * When a client dies:
+ * - Check for and clean up flipped page state
+ */
+static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+
+static void psb_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+ .resume = gma_power_resume,
+ .suspend = gma_power_suspend,
+ .thaw = gma_power_thaw,
+ .freeze = gma_power_freeze,
+ .restore = gma_power_restore,
+ .runtime_suspend = psb_runtime_suspend,
+ .runtime_resume = psb_runtime_resume,
+ .runtime_idle = psb_runtime_idle,
+};
+
+static const struct vm_operations_struct psb_gem_vm_ops = {
+ .fault = psb_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations psb_gem_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = psb_unlocked_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+ DRIVER_MODESET | DRIVER_GEM,
+ .load = psb_driver_load,
+ .unload = psb_driver_unload,
+ .lastclose = psb_driver_lastclose,
+ .preclose = psb_driver_preclose,
+
+ .num_ioctls = ARRAY_SIZE(psb_ioctls),
+ .device_is_agp = psb_driver_device_is_agp,
+ .irq_preinstall = psb_irq_preinstall,
+ .irq_postinstall = psb_irq_postinstall,
+ .irq_uninstall = psb_irq_uninstall,
+ .irq_handler = psb_irq_handler,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
+
+ .gem_free_object = psb_gem_free_object,
+ .gem_vm_ops = &psb_gem_vm_ops,
+
+ .dumb_create = psb_gem_dumb_create,
+ .dumb_map_offset = psb_gem_dumb_map_gtt,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .ioctls = psb_ioctls,
+ .fops = &psb_gem_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = psb_pci_probe,
+ .remove = psb_pci_remove,
+ .driver.pm = &psb_pm_ops,
+};
+
+static int __init psb_init(void)
+{
+ return drm_pci_init(&driver, &psb_pci_driver);
+}
+
+static void __exit psb_exit(void)
+{
+ drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+late_initcall(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
new file mode 100644
index 00000000000..55ebe2bd88d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -0,0 +1,922 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/kref.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_global.h>
+#include <drm/gma_drm.h>
+#include "psb_reg.h"
+#include "psb_intel_drv.h"
+#include "gma_display.h"
+#include "intel_bios.h"
+#include "gtt.h"
+#include "power.h"
+#include "opregion.h"
+#include "oaktrail.h"
+#include "mmu.h"
+
+#define DRIVER_AUTHOR "Alan Cox <alan@linux.intel.com> and others"
+#define DRIVER_LICENSE "GPL"
+
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
+#define DRIVER_DATE "20140314"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+/* Append new drm mode definition here, align with libdrm definition */
+#define DRM_MODE_SCALE_NO_SCALE 2
+
+enum {
+ CHIP_PSB_8108 = 0, /* Poulsbo */
+ CHIP_PSB_8109 = 1, /* Poulsbo */
+ CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
+ CHIP_MFLD_0130 = 3, /* Medfield */
+};
+
+#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
+
+/* Hardware offsets */
+#define PSB_VDC_OFFSET 0x00000000
+#define PSB_VDC_SIZE 0x000080000
+#define MRST_MMIO_SIZE 0x0000C0000
+#define MDFLD_MMIO_SIZE 0x000100000
+#define PSB_SGX_SIZE 0x8000
+#define PSB_SGX_OFFSET 0x00040000
+#define MRST_SGX_OFFSET 0x00080000
+
+/* PCI resource identifiers */
+#define PSB_MMIO_RESOURCE 0
+#define PSB_AUX_RESOURCE 0
+#define PSB_GATT_RESOURCE 2
+#define PSB_GTT_RESOURCE 3
+
+/* PCI configuration */
+#define PSB_GMCH_CTRL 0x52
+#define PSB_BSM 0x5C
+#define _PSB_GMCH_ENABLED 0x4
+#define PSB_PGETBL_CTL 0x2020
+#define _PSB_PGETBL_ENABLED 0x00000001
+#define PSB_SGX_2D_SLAVE_PORT 0x4000
+
+/* TODO: To get rid of */
+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+
+/* SGX side MMU definitions (these can probably go) */
+
+/* Flags for external memory type field */
+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
+
+/* PTE's and PDE's */
+#define PSB_PDE_MASK 0x003FFFFF
+#define PSB_PDE_SHIFT 22
+#define PSB_PTE_SHIFT 12
+
+/* Cache control */
+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
+#define PSB_PTE_WO 0x0002 /* Write only */
+#define PSB_PTE_RO 0x0004 /* Read only */
+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
+
+/* VDC registers and bits */
+#define PSB_MSVDX_CLOCKGATING 0x2064
+#define PSB_TOPAZ_CLOCKGATING 0x2068
+#define PSB_HWSTAM 0x2098
+#define PSB_INSTPM 0x20C0
+#define PSB_INT_IDENTITY_R 0x20A4
+#define _PSB_IRQ_ASLE (1<<0)
+#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
+#define _PSB_DPST_PIPEB_FLAG (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
+#define _PSB_DPST_PIPEA_FLAG (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
+#define _MDFLD_MIPIA_FLAG (1<<16)
+#define _MDFLD_MIPIC_FLAG (1<<17)
+#define _PSB_IRQ_DISP_HOTSYNC (1<<17)
+#define _PSB_IRQ_SGX_FLAG (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
+
+#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
+ _PSB_VSYNC_PIPEB_FLAG)
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+ _MDFLD_PIPEB_EVENT_FLAG | \
+ _PSB_PIPEA_EVENT_FLAG | \
+ _PSB_VSYNC_PIPEA_FLAG | \
+ _MDFLD_MIPIA_FLAG | \
+ _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R 0x20A4
+#define PSB_INT_MASK_R 0x20A8
+#define PSB_INT_ENABLE_R 0x20A0
+
+#define _PSB_MMU_ER_MASK 0x0001FF00
+#define _PSB_MMU_ER_HOST (1 << 16)
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+#define GPIO_CLOCK_DIR_MASK (1 << 0)
+#define GPIO_CLOCK_DIR_IN (0 << 1)
+#define GPIO_CLOCK_DIR_OUT (1 << 1)
+#define GPIO_CLOCK_VAL_MASK (1 << 2)
+#define GPIO_CLOCK_VAL_OUT (1 << 3)
+#define GPIO_CLOCK_VAL_IN (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+#define GPIO_DATA_DIR_MASK (1 << 8)
+#define GPIO_DATA_DIR_IN (0 << 9)
+#define GPIO_DATA_DIR_OUT (1 << 9)
+#define GPIO_DATA_VAL_MASK (1 << 10)
+#define GPIO_DATA_VAL_OUT (1 << 11)
+#define GPIO_DATA_VAL_IN (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define VCLK_DIVISOR_VGA0 0x6000
+#define VCLK_DIVISOR_VGA1 0x6004
+#define VCLK_POST_DIV 0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST 1
+#define PSB_UIRQ_OOM_REPLY 2
+#define PSB_UIRQ_FIRE_TA_REPLY 3
+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (HZ * 2)
+#define PSB_LID_DELAY (HZ / 10)
+
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0 (1 << 0)
+#define MDFLD_DSR_2D_3D_2 (1 << 1)
+#define MDFLD_DSR_CURSOR_0 (1 << 2)
+#define MDFLD_DSR_CURSOR_2 (1 << 3)
+#define MDFLD_DSR_OVERLAY_0 (1 << 4)
+#define MDFLD_DSR_OVERLAY_2 (1 << 5)
+#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
+#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
+#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
+#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR 45
+#define MDFLD_DPU_ENABLE (1 << 31)
+#define MDFLD_DSR_FULLSCREEN (1 << 30)
+#define MDFLD_DSR_DELAY (HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON 1
+#define PSB_PWR_STATE_OFF 2
+
+#define PSB_PMPOLICY_NOPM 0
+#define PSB_PMPOLICY_CLOCKGATING 1
+#define PSB_PMPOLICY_POWERDOWN 2
+
+#define PSB_PMSTATE_POWERUP 0
+#define PSB_PMSTATE_CLOCKGATED 1
+#define PSB_PMSTATE_POWERDOWN 2
+#define PSB_PCIx_MSI_ADDR_LOC 0x94
+#define PSB_PCIx_MSI_DATA_LOC 0x98
+
+/* Medfield crystal settings */
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct psb_intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+ void *vbt;
+ u32 __iomem *lid_state;
+ struct work_struct asle_work;
+};
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 i2c_speed;
+ u8 ddc_pin;
+};
+
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+};
+
+/* Register offset maps */
+struct psb_offset {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 surf;
+ u32 addr;
+ u32 base;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette;
+};
+
+/*
+ * Register save state. This is used to hold the context when the
+ * device is powered off. In the case of Oaktrail this can (but does not
+ * yet) include screen blank. Operations occuring during the save
+ * update the register cache instead.
+ */
+
+/* Common status for pipes */
+struct psb_pipe {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 base;
+ u32 surf;
+ u32 addr;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette[256];
+};
+
+struct psb_state {
+ uint32_t saveVCLK_DIVISOR_VGA0;
+ uint32_t saveVCLK_DIVISOR_VGA1;
+ uint32_t saveVCLK_POST_DIV;
+ uint32_t saveVGACNTRL;
+ uint32_t saveADPA;
+ uint32_t saveLVDS;
+ uint32_t saveDVOA;
+ uint32_t saveDVOB;
+ uint32_t saveDVOC;
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t saveCLOCKGATING;
+ uint32_t saveDSPARB;
+ uint32_t savePFIT_AUTO_RATIOS;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t savePP_ON_DELAYS;
+ uint32_t savePP_OFF_DELAYS;
+ uint32_t savePP_DIVISOR;
+ uint32_t saveBCLRPAT_A;
+ uint32_t saveBCLRPAT_B;
+ uint32_t savePERF_MODE;
+ uint32_t saveDSPFW1;
+ uint32_t saveDSPFW2;
+ uint32_t saveDSPFW3;
+ uint32_t saveDSPFW4;
+ uint32_t saveDSPFW5;
+ uint32_t saveDSPFW6;
+ uint32_t saveCHICKENBIT;
+ uint32_t saveDSPACURSOR_CTRL;
+ uint32_t saveDSPBCURSOR_CTRL;
+ uint32_t saveDSPACURSOR_BASE;
+ uint32_t saveDSPBCURSOR_BASE;
+ uint32_t saveDSPACURSOR_POS;
+ uint32_t saveDSPBCURSOR_POS;
+ uint32_t saveOV_OVADD;
+ uint32_t saveOV_OGAMC0;
+ uint32_t saveOV_OGAMC1;
+ uint32_t saveOV_OGAMC2;
+ uint32_t saveOV_OGAMC3;
+ uint32_t saveOV_OGAMC4;
+ uint32_t saveOV_OGAMC5;
+ uint32_t saveOVC_OVADD;
+ uint32_t saveOVC_OGAMC0;
+ uint32_t saveOVC_OGAMC1;
+ uint32_t saveOVC_OGAMC2;
+ uint32_t saveOVC_OGAMC3;
+ uint32_t saveOVC_OGAMC4;
+ uint32_t saveOVC_OGAMC5;
+
+ /* DPST register save */
+ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+ uint32_t savePWM_CONTROL_LOGIC;
+};
+
+struct medfield_state {
+ uint32_t saveMIPI;
+ uint32_t saveMIPI_C;
+
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveHDMIPHYMISCCTL;
+ uint32_t saveHDMIB_CONTROL;
+};
+
+struct cdv_state {
+ uint32_t saveDSPCLK_GATE_D;
+ uint32_t saveRAMCLK_GATE_D;
+ uint32_t saveDSPARB;
+ uint32_t saveDSPFW[6];
+ uint32_t saveADPA;
+ uint32_t savePP_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveLVDS;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePP_ON_DELAYS;
+ uint32_t savePP_OFF_DELAYS;
+ uint32_t savePP_CYCLE;
+ uint32_t saveVGACNTRL;
+ uint32_t saveIER;
+ uint32_t saveIMR;
+ u8 saveLBB;
+};
+
+struct psb_save_area {
+ struct psb_pipe pipe[3];
+ uint32_t saveBSM;
+ uint32_t saveVBT;
+ union {
+ struct psb_state psb;
+ struct medfield_state mdfld;
+ struct cdv_state cdv;
+ };
+ uint32_t saveBLC_PWM_CTL2;
+ uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_ops;
+
+#define PSB_NUM_PIPE 3
+
+struct drm_psb_private {
+ struct drm_device *dev;
+ struct pci_dev *aux_pdev; /* Currently only used by mrst */
+ const struct psb_ops *ops;
+ const struct psb_offset *regmap;
+
+ struct child_device_config *child_dev;
+ int child_dev_num;
+
+ struct psb_gtt gtt;
+
+ /* GTT Memory manager */
+ struct psb_gtt_mm *gtt_mm;
+ struct page *scratch_page;
+ u32 __iomem *gtt_map;
+ uint32_t stolen_base;
+ u8 __iomem *vram_addr;
+ unsigned long vram_stolen_size;
+ int gtt_initialized;
+ u16 gmch_ctrl; /* Saved GTT setup */
+ u32 pge_ctl;
+
+ struct mutex gtt_mutex;
+ struct resource *gtt_mem; /* Our PCI resource */
+
+ struct psb_mmu_driver *mmu;
+ struct psb_mmu_pd *pf_pd;
+
+ /* Register base */
+ uint8_t __iomem *sgx_reg;
+ uint8_t __iomem *vdc_reg;
+ uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
+ uint32_t gatt_free_offset;
+
+ /* Fencing / irq */
+ uint32_t vdc_irq_mask;
+ uint32_t pipestat[PSB_NUM_PIPE];
+
+ spinlock_t irqmask_lock;
+
+ /* Power */
+ bool suspended;
+ bool display_power;
+ int display_count;
+
+ /* Modesetting */
+ struct psb_intel_mode_device mode_dev;
+ bool modeset; /* true if we have done the mode_device setup */
+
+ struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+ struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+ uint32_t num_pipe;
+
+ /* OSPM info (Power management base) (TODO: can go ?) */
+ uint32_t ospm_base;
+
+ /* Sizes info */
+ u32 fuse_reg_value;
+ u32 video_device_fuse;
+
+ /* PCI revision ID for B0:D2:F0 */
+ uint8_t platform_rev_id;
+
+ /* gmbus */
+ struct intel_gmbus *gmbus;
+ uint8_t __iomem *gmbus_reg;
+
+ /* Used by SDVO */
+ int crt_ddc_pin;
+ /* FIXME: The mappings should be parsed from bios but for now we can
+ pretend there are no mappings available */
+ struct sdvo_device_mapping sdvo_mappings[2];
+ u32 hotplug_supported_mask;
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+
+ /* LVDS info */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *lfp_lvds_vbt_mode;
+ struct drm_display_mode *sdvo_lvds_vbt_mode;
+
+ struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
+ struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ int lvds_ssc_freq;
+ bool is_lvds_on;
+ bool is_mipi_on;
+ u32 mipi_ctrl_display;
+
+ unsigned int core_freq;
+ uint32_t iLVDS_enable;
+
+ /* Runtime PM state */
+ int rpm_enabled;
+
+ /* MID specific */
+ bool has_gct;
+ struct oaktrail_gct_data gct_data;
+
+ /* Oaktrail HDMI state */
+ struct oaktrail_hdmi_dev *hdmi_priv;
+
+ /* Register state */
+ struct psb_save_area regs;
+
+ /* MSI reg save */
+ uint32_t msi_addr;
+ uint32_t msi_data;
+
+ /* Hotplug handling */
+ struct work_struct hotplug_work;
+
+ /* LID-Switch */
+ spinlock_t lid_lock;
+ struct timer_list lid_timer;
+ struct psb_intel_opregion opregion;
+ u32 lid_last_state;
+
+ /* Watchdog */
+ uint32_t apm_reg;
+ uint16_t apm_base;
+
+ /*
+ * Used for modifying backlight from
+ * xrandr -- consider removing and using HAL instead
+ */
+ struct backlight_device *backlight_device;
+ struct drm_property *backlight_property;
+ bool backlight_enabled;
+ int backlight_level;
+ uint32_t blc_adj1;
+ uint32_t blc_adj2;
+
+ void *fbdev;
+
+ /* 2D acceleration */
+ spinlock_t lock_2d;
+
+ /* Panel brightness */
+ int brightness;
+ int brightness_adjusted;
+
+ bool dsr_enable;
+ u32 dsr_fb_update;
+ bool dpi_panel_on[3];
+ void *dsi_configs[2];
+ u32 bpp;
+ u32 bpp2;
+
+ u32 pipeconf[3];
+ u32 dspcntr[3];
+
+ int mdfld_panel_id;
+
+ bool dplla_96mhz; /* DPLL data from the VBT */
+
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ uint8_t panel_type;
+};
+
+
+/* Operations for each board type */
+struct psb_ops {
+ const char *name;
+ unsigned int accel_2d:1;
+ int pipes; /* Number of output pipes */
+ int crtcs; /* Number of CRTCs */
+ int sgx_offset; /* Base offset of SGX device */
+ int hdmi_mask; /* Mask of HDMI CRTCs */
+ int lvds_mask; /* Mask of LVDS CRTCs */
+ int sdvo_mask; /* Mask of SDVO CRTCs */
+ int cursor_needs_phys; /* If cursor base reg need physical address */
+
+ /* Sub functions */
+ struct drm_crtc_helper_funcs const *crtc_helper;
+ struct drm_crtc_funcs const *crtc_funcs;
+ const struct gma_clock_funcs *clock_funcs;
+
+ /* Setup hooks */
+ int (*chip_setup)(struct drm_device *dev);
+ void (*chip_teardown)(struct drm_device *dev);
+ /* Optional helper caller after modeset */
+ void (*errata)(struct drm_device *dev);
+
+ /* Display management hooks */
+ int (*output_init)(struct drm_device *dev);
+ int (*hotplug)(struct drm_device *dev);
+ void (*hotplug_enable)(struct drm_device *dev, bool on);
+ /* Power management hooks */
+ void (*init_pm)(struct drm_device *dev);
+ int (*save_regs)(struct drm_device *dev);
+ int (*restore_regs)(struct drm_device *dev);
+ int (*power_up)(struct drm_device *dev);
+ int (*power_down)(struct drm_device *dev);
+ void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
+ void (*disable_sr)(struct drm_device *dev);
+
+ void (*lvds_bl_power)(struct drm_device *dev, bool on);
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ /* Backlight */
+ int (*backlight_init)(struct drm_device *dev);
+#endif
+ int i2c_bus; /* I2C bus identifier for Moorestown */
+};
+
+
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+ return (struct drm_psb_private *) dev->dev_private;
+}
+
+/* psb_irq.c */
+extern irqreturn_t psb_irq_handler(int irq, void *arg);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+
+/* framebuffer.c */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev,
+ struct drm_framebuffer *fb);
+/* accel_2d.c */
+extern void psbfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region);
+extern int psbfb_sync(struct fb_info *info);
+extern void psb_spank(struct drm_psb_private *dev_priv);
+
+/* psb_reset.c */
+extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+extern int psb_fbdev_init(struct drm_device *dev);
+
+/* backlight.c */
+int gma_backlight_init(struct drm_device *dev);
+void gma_backlight_exit(struct drm_device *dev);
+void gma_backlight_disable(struct drm_device *dev);
+void gma_backlight_enable(struct drm_device *dev);
+void gma_backlight_set(struct drm_device *dev, int v);
+
+/* oaktrail_crtc.c */
+extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
+
+/* oaktrail_lvds.c */
+extern void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+
+/* psb_intel_display.c */
+extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+
+/* psb_intel_lvds.c */
+extern const struct drm_connector_helper_funcs
+ psb_intel_lvds_connector_helper_funcs;
+extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
+
+/* gem.c */
+extern void psb_gem_free_object(struct drm_gem_object *obj);
+extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+ struct drm_file *file);
+extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* psb_device.c */
+extern const struct psb_ops psb_chip_ops;
+
+/* oaktrail_device.c */
+extern const struct psb_ops oaktrail_chip_ops;
+
+/* mdlfd_device.c */
+extern const struct psb_ops mdfld_chip_ops;
+
+/* cdv_device.c */
+extern const struct psb_ops cdv_chip_ops;
+
+/* Debug print bits setting */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT (1 << 1)
+#define PSB_D_IRQ (1 << 2)
+#define PSB_D_ENTRY (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV (1 << 4)
+#define PSB_D_DBI_BF (1 << 5)
+#define PSB_D_PM (1 << 6)
+#define PSB_D_RENDER (1 << 7)
+#define PSB_D_REG (1 << 8)
+#define PSB_D_MSVDX (1 << 9)
+#define PSB_D_TOPAZ (1 << 10)
+
+extern int drm_idle_check_interval;
+
+/* Utilities */
+static inline u32 MRST_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return ioread32(dev_priv->vdc_reg + reg);
+}
+
+static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return ioread32(dev_priv->aux_reg + reg);
+}
+
+#define REG_READ(reg) REGISTER_READ(dev, (reg))
+#define REG_READ_AUX(reg) REGISTER_READ_AUX(dev, (reg))
+
+/* Useful for post reads */
+static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
+ uint32_t reg, int aux)
+{
+ uint32_t val;
+
+ if (aux)
+ val = REG_READ_AUX(reg);
+ else
+ val = REG_READ(reg);
+
+ return val;
+}
+
+#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
+
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+ uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
+ uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite32((val), dev_priv->aux_reg + (reg));
+}
+
+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
+#define REG_WRITE_AUX(reg, val) REGISTER_WRITE_AUX(dev, (reg), (val))
+
+static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
+ uint32_t val, int aux)
+{
+ if (aux)
+ REG_WRITE_AUX(reg, val);
+ else
+ REG_WRITE(reg, val);
+}
+
+#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+ uint32_t reg, uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+ uint32_t reg, uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_WVDC32(_val, _offs) iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs) ioread32(dev_priv->vdc_reg + (_offs))
+
+/* #define TRAP_SGX_PM_FAULT 1 */
+#ifdef TRAP_SGX_PM_FAULT
+#define PSB_RSGX32(_offs) \
+({ \
+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
+ printk(KERN_ERR \
+ "access sgx when it's off!! (READ) %s, %d\n", \
+ __FILE__, __LINE__); \
+ melay(1000); \
+ } \
+ ioread32(dev_priv->sgx_reg + (_offs)); \
+})
+#else
+#define PSB_RSGX32(_offs) ioread32(dev_priv->sgx_reg + (_offs))
+#endif
+#define PSB_WSGX32(_val, _offs) iowrite32(_val, dev_priv->sgx_reg + (_offs))
+
+#define MSVDX_REG_DUMP 0
+
+#define PSB_WMSVDX32(_val, _offs) iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs) ioread32(dev_priv->msvdx_reg + (_offs))
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
new file mode 100644
index 00000000000..87b50ba64ed
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "gma_display.h"
+#include "power.h"
+
+#define INTEL_LIMIT_I9XX_SDVO_DAC 0
+#define INTEL_LIMIT_I9XX_LVDS 1
+
+static const struct gma_limit_t psb_intel_limits[] = {
+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1400000, .max = 2800000},
+ .n = {.min = 1, .max = 6},
+ .m = {.min = 70, .max = 120},
+ .m1 = {.min = 8, .max = 18},
+ .m2 = {.min = 3, .max = 7},
+ .p = {.min = 5, .max = 80},
+ .p1 = {.min = 1, .max = 8},
+ .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = gma_find_best_pll,
+ },
+ { /* INTEL_LIMIT_I9XX_LVDS */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1400000, .max = 2800000},
+ .n = {.min = 1, .max = 6},
+ .m = {.min = 70, .max = 120},
+ .m1 = {.min = 8, .max = 18},
+ .m2 = {.min = 3, .max = 7},
+ .p = {.min = 7, .max = 98},
+ .p1 = {.min = 1, .max = 8},
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7},
+ .find_pll = gma_find_best_pll,
+ },
+};
+
+static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ const struct gma_limit_t *limit;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ else
+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ return limit;
+}
+
+static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ /* Must be on PIPE 1 for PSB */
+ return 1;
+}
+
+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ int refclk;
+ struct gma_clock_t clock;
+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false;
+ bool is_lvds = false, is_tv = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ const struct gma_limit_t *limit;
+
+ /* No scan out no play */
+ if (crtc->primary->fb == NULL) {
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ return 0;
+ }
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ switch (gma_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ }
+ }
+
+ refclk = 96000;
+
+ limit = gma_crtc->clock_funcs->limit(crtc, refclk);
+
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
+ &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
+ adjusted_mode->clock, clock.dot);
+ return 0;
+ }
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (is_lvds) {
+ dpll |= DPLLB_MODE_LVDS;
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ } else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int sdvo_pixel_multiply =
+ adjusted_mode->clock / mode->clock;
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |=
+ (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 1)) << 16;
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(map->conf);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ drm_mode_debug_printmodeline(mode);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ udelay(150);
+ }
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = REG_READ(LVDS);
+
+ lvds &= ~LVDS_PIPEB_SELECT;
+ if (pipe == 1)
+ lvds |= LVDS_PIPEB_SELECT;
+
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ /* Set the B0-B3 data pairs corresponding to
+ * whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more
+ * thoroughly into how panels behave in the two modes.
+ */
+
+ REG_WRITE(LVDS, lvds);
+ REG_READ(LVDS);
+ }
+
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ /* write it again -- the BIOS does, after all */
+ REG_WRITE(map->dpll, dpll);
+
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ REG_WRITE(map->size,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
+
+ gma_wait_for_vblank(dev);
+
+ REG_WRITE(map->cntr, dspcntr);
+
+ /* Flush the plane changes */
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+
+ gma_wait_for_vblank(dev);
+
+ return 0;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int psb_intel_crtc_clock_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 dpll;
+ u32 fp;
+ struct gma_clock_t clock;
+ bool is_lvds;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+
+ if (gma_power_begin(dev, false)) {
+ dpll = REG_READ(map->dpll);
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = REG_READ(map->fp0);
+ else
+ fp = REG_READ(map->fp1);
+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+ gma_power_end(dev);
+ } else {
+ dpll = p->dpll;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = p->fp0;
+ else
+ fp = p->fp1;
+
+ is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
+ LVDS_PORT_EN);
+ }
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+ if (is_lvds) {
+ clock.p1 =
+ ffs((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ psb_intel_clock(66000, &clock);
+ } else
+ psb_intel_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 =
+ ((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ psb_intel_clock(48000, &clock);
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot;
+ int hsync;
+ int vtot;
+ int vsync;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+
+ if (gma_power_begin(dev, false)) {
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
+ gma_power_end(dev);
+ } else {
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+ .dpms = gma_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
+ .mode_set = psb_intel_crtc_mode_set,
+ .mode_set_base = gma_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+ .disable = gma_crtc_disable,
+};
+
+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+ .save = gma_crtc_save,
+ .restore = gma_crtc_restore,
+ .cursor_set = gma_crtc_cursor_set,
+ .cursor_move = gma_crtc_cursor_move,
+ .gamma_set = gma_crtc_gamma_set,
+ .set_config = gma_crtc_set_config,
+ .destroy = gma_crtc_destroy,
+};
+
+const struct gma_clock_funcs psb_clock_funcs = {
+ .clock = psb_intel_clock,
+ .limit = psb_intel_limit,
+ .pll_is_valid = gma_pll_is_valid,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on Oaktrail
+ */
+static void psb_intel_cursor_init(struct drm_device *dev,
+ struct gma_crtc *gma_crtc)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+ u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+ struct gtt_range *cursor_gt;
+
+ if (dev_priv->ops->cursor_needs_phys) {
+ /* Allocate 4 pages of stolen mem for a hardware cursor. That
+ * is enough for the 64 x 64 ARGB cursors we support.
+ */
+ cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1,
+ PAGE_SIZE);
+ if (!cursor_gt) {
+ gma_crtc->cursor_gt = NULL;
+ goto out;
+ }
+ gma_crtc->cursor_gt = cursor_gt;
+ gma_crtc->cursor_addr = dev_priv->stolen_base +
+ cursor_gt->offset;
+ } else {
+ gma_crtc->cursor_gt = NULL;
+ }
+
+out:
+ REG_WRITE(control[gma_crtc->pipe], 0);
+ REG_WRITE(base[gma_crtc->pipe], 0);
+}
+
+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc;
+ int i;
+ uint16_t *r_base, *g_base, *b_base;
+
+ /* We allocate a extra array of drm_connector pointers
+ * for fbdev after the crtc */
+ gma_crtc = kzalloc(sizeof(struct gma_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+ if (gma_crtc == NULL)
+ return;
+
+ gma_crtc->crtc_state =
+ kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+ if (!gma_crtc->crtc_state) {
+ dev_err(dev->dev, "Crtc state error: No memory\n");
+ kfree(gma_crtc);
+ return;
+ }
+
+ /* Set the CRTC operations from the chip specific data */
+ drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs);
+
+ /* Set the CRTC clock functions from chip specific data */
+ gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
+
+ drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256);
+ gma_crtc->pipe = pipe;
+ gma_crtc->plane = pipe;
+
+ r_base = gma_crtc->base.gamma_store;
+ g_base = r_base + 256;
+ b_base = g_base + 256;
+ for (i = 0; i < 256; i++) {
+ gma_crtc->lut_r[i] = i;
+ gma_crtc->lut_g[i] = i;
+ gma_crtc->lut_b[i] = i;
+ r_base[i] = i << 8;
+ g_base[i] = i << 8;
+ b_base[i] = i << 8;
+
+ gma_crtc->lut_adj[i] = 0;
+ }
+
+ gma_crtc->mode_dev = mode_dev;
+ gma_crtc->cursor_addr = 0;
+
+ drm_crtc_helper_add(&gma_crtc->base,
+ dev_priv->ops->crtc_helper);
+
+ /* Setup the array of drm_connector pointer array */
+ gma_crtc->mode_set.crtc = &gma_crtc->base;
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base;
+ gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1);
+ gma_crtc->mode_set.num_connectors = 0;
+ psb_intel_cursor_init(dev, gma_crtc);
+
+ /* Set to true so that the pipe is forced off on initial config. */
+ gma_crtc->active = true;
+}
+
+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ if (gma_crtc->pipe == pipe)
+ break;
+ }
+ return crtc;
+}
+
+int gma_connector_clones(struct drm_device *dev, int type_mask)
+{
+ int index_mask = 0;
+ struct drm_connector *connector;
+ int entry = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ if (type_mask & (1 << gma_encoder->type))
+ index_mask |= (1 << entry);
+ entry++;
+ }
+ return index_mask;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
new file mode 100644
index 00000000000..336bd3aa1a0
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/gpio.h>
+#include "gma_display.h"
+
+/*
+ * Display related stuff
+ */
+
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
+/* these are outputs from the chip - integrated only
+ * external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_MIPI 7
+#define INTEL_OUTPUT_MIPI2 8
+#define INTEL_OUTPUT_DISPLAYPORT 9
+#define INTEL_OUTPUT_EDP 10
+
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
+ >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+
+/*
+ * Hold information useally put on the device driver privates here,
+ * since it needs to be shared across multiple of devices drivers privates.
+ */
+struct psb_intel_mode_device {
+
+ /*
+ * Abstracted memory manager operations
+ */
+ size_t(*bo_offset) (struct drm_device *dev, void *bo);
+
+ /*
+ * LVDS info
+ */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *panel_fixed_mode2;
+ struct drm_display_mode *vbt_mode; /* if any */
+
+ uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_intel_i2c_chan {
+ /* for getting at dev. private (mmio etc.) */
+ struct drm_device *drm_dev;
+ u32 reg; /* GPIO reg */
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ u8 slave_addr;
+};
+
+struct gma_encoder {
+ struct drm_encoder base;
+ int type;
+ bool needs_tv_clock;
+ void (*hot_plug)(struct gma_encoder *);
+ int crtc_mask;
+ int clone_mask;
+ u32 ddi_select; /* Channel info */
+#define DDI0_SELECT 0x01
+#define DDI1_SELECT 0x02
+#define DP_MASK 0x8000
+#define DDI_MASK 0x03
+ void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
+
+ /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
+ own set of output privates */
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
+};
+
+struct gma_connector {
+ struct drm_connector base;
+ struct gma_encoder *encoder;
+};
+
+struct psb_intel_crtc_state {
+ uint32_t saveDSPCNTR;
+ uint32_t savePIPECONF;
+ uint32_t savePIPESRC;
+ uint32_t saveDPLL;
+ uint32_t saveFP0;
+ uint32_t saveFP1;
+ uint32_t saveHTOTAL;
+ uint32_t saveHBLANK;
+ uint32_t saveHSYNC;
+ uint32_t saveVTOTAL;
+ uint32_t saveVBLANK;
+ uint32_t saveVSYNC;
+ uint32_t saveDSPSTRIDE;
+ uint32_t saveDSPSIZE;
+ uint32_t saveDSPPOS;
+ uint32_t saveDSPBASE;
+ uint32_t savePalette[256];
+};
+
+struct gma_crtc {
+ struct drm_crtc base;
+ int pipe;
+ int plane;
+ uint32_t cursor_addr;
+ struct gtt_range *cursor_gt;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ u8 lut_adj[256];
+ struct psb_intel_framebuffer *fbdev_fb;
+ /* a mode_set for fbdev users on this crtc */
+ struct drm_mode_set mode_set;
+
+ /* GEM object that holds our cursor */
+ struct drm_gem_object *cursor_obj;
+
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode saved_adjusted_mode;
+
+ struct psb_intel_mode_device *mode_dev;
+
+ /*crtc mode setting flags*/
+ u32 mode_flags;
+
+ bool active;
+
+ /* Saved Crtc HW states */
+ struct psb_intel_crtc_state *crtc_state;
+
+ const struct gma_clock_funcs *clock_funcs;
+};
+
+#define to_gma_crtc(x) \
+ container_of(x, struct gma_crtc, base)
+#define to_gma_connector(x) \
+ container_of(x, struct gma_connector, base)
+#define to_gma_encoder(x) \
+ container_of(x, struct gma_encoder, base)
+#define to_psb_intel_framebuffer(x) \
+ container_of(x, struct psb_intel_framebuffer, base)
+
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+ const u32 reg, const char *name);
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
+
+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+ struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_crt_init(struct drm_device *dev);
+extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void psb_intel_dvo_init(struct drm_device *dev);
+extern void psb_intel_tv_init(struct drm_device *dev);
+extern void psb_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
+extern void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+extern void oaktrail_dsi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void mid_dsi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int dsi_num);
+
+extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector);
+extern void gma_connector_attach_encoder(struct gma_connector *connector,
+ struct gma_encoder *encoder);
+
+static inline struct gma_encoder *gma_attached_encoder(
+ struct drm_connector *connector)
+{
+ return to_gma_connector(connector)->encoder;
+}
+
+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+ int pipe);
+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
+ int sdvoB);
+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
+ int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev,
+ struct drm_framebuffer *fb);
+extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value);
+extern void psb_intel_lvds_destroy(struct drm_connector *connector);
+extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+
+/* intel_gmbus.c */
+extern void gma_intel_i2c_reset(struct drm_device *dev);
+extern int gma_intel_setup_gmbus(struct drm_device *dev);
+extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+
+/* DP support */
+extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
+extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
+extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
+extern void cdv_sb_reset(struct drm_device *dev);
+
+extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
new file mode 100644
index 00000000000..d7778d0472c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -0,0 +1,850 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/*
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPT 0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct psb_intel_lvds_priv {
+ /*
+ * Saved LVDO output states
+ */
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t saveLVDS;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
+};
+
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 ret;
+
+ if (gma_power_begin(dev, false)) {
+ ret = REG_READ(BLC_PWM_CTL);
+ gma_power_end(dev);
+ } else /* Powered off, use the saved value */
+ ret = dev_priv->regs.saveBLC_PWM_CTL;
+
+ /* Top 15bits hold the frequency mask */
+ ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT;
+
+ ret *= 2; /* Return a 16bit range as needed for setting */
+ if (ret == 0)
+ dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+ REG_READ(BLC_PWM_CTL), dev_priv->regs.saveBLC_PWM_CTL);
+ return ret;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ *
+ * FIXME: at some point we need to both track this for PM and also
+ * disable runtime pm on MRST if the brightness is nil (ie blanked)
+ */
+static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
+ unsigned int level)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+
+ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ u8 out_buf[2];
+ unsigned int blc_i2c_brightness;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = lvds_i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+ BRIGHTNESS_MASK /
+ BRIGHTNESS_MAX_LEVEL);
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+ out_buf[1] = (u8)blc_i2c_brightness;
+
+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+ dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
+ dev_priv->lvds_bl->brightnesscmd,
+ blc_i2c_brightness);
+ return 0;
+ }
+
+ dev_err(dev->dev, "I2C transfer error\n");
+ return -1;
+}
+
+
+static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+
+ u32 max_pwm_blc;
+ u32 blc_pwm_duty_cycle;
+
+ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
+
+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
+ BUG_ON(max_pwm_blc == 0);
+
+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ dev_dbg(dev->dev, "backlight level is %d\n", level);
+
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "NO LVDS backlight info\n");
+ return;
+ }
+
+ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+ psb_lvds_i2c_set_brightness(dev, level);
+ else
+ psb_lvds_pwm_set_brightness(dev, level);
+}
+
+/*
+ * Sets the backlight level.
+ *
+ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+ */
+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ if (gma_power_begin(dev, false)) {
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL,
+ (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ gma_power_end(dev);
+ } else {
+ blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
+ ~BACKLIGHT_DUTY_CYCLE_MASK;
+ dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ }
+}
+
+/*
+ * Sets the power state for the panel.
+ */
+static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ u32 pp_status;
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "set power, chip off!\n");
+ return;
+ }
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ psb_intel_lvds_set_backlight(dev,
+ mode_dev->backlight_duty_cycle);
+ } else {
+ psb_intel_lvds_set_backlight(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+
+ gma_power_end(dev);
+}
+
+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ psb_intel_lvds_set_power(dev, true);
+ else
+ psb_intel_lvds_set_power(dev, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void psb_intel_lvds_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv =
+ (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
+
+ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
+ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
+ lvds_priv->saveLVDS = REG_READ(LVDS);
+ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
+ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
+ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
+ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+
+ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
+ dev_priv->backlight_duty_cycle = (dev_priv->regs.saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ /*
+ * If the light is off at server startup,
+ * just make it full brightness
+ */
+ if (dev_priv->backlight_duty_cycle == 0)
+ dev_priv->backlight_duty_cycle =
+ psb_intel_lvds_get_max_backlight(dev);
+
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ lvds_priv->savePP_ON,
+ lvds_priv->savePP_OFF,
+ lvds_priv->saveLVDS,
+ lvds_priv->savePP_CONTROL,
+ lvds_priv->savePP_CYCLE,
+ lvds_priv->saveBLC_PWM_CTL);
+}
+
+static void psb_intel_lvds_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ u32 pp_status;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv =
+ (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
+
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ lvds_priv->savePP_ON,
+ lvds_priv->savePP_OFF,
+ lvds_priv->saveLVDS,
+ lvds_priv->savePP_CONTROL,
+ lvds_priv->savePP_CYCLE,
+ lvds_priv->saveBLC_PWM_CTL);
+
+ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
+ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
+ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
+ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
+ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
+ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
+ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
+ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
+ REG_WRITE(LVDS, lvds_priv->saveLVDS);
+
+ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+ } else {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+}
+
+int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct drm_display_mode *fixed_mode =
+ dev_priv->mode_dev.panel_fixed_mode;
+
+ if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
+ fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ return MODE_OK;
+}
+
+bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
+ struct drm_encoder *tmp_encoder;
+ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+
+ if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
+ panel_fixed_mode = mode_dev->panel_fixed_mode2;
+
+ /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
+ if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
+ return false;
+ }
+ if (IS_MRST(dev) && gma_crtc->pipe != 0) {
+ printk(KERN_ERR "Must use PIPE A\n");
+ return false;
+ }
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (tmp_encoder != encoder
+ && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+ adjusted_mode->clock = panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode,
+ CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ psb_intel_lvds_set_power(dev, false);
+
+ gma_power_end(dev);
+}
+
+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ psb_intel_lvds_get_max_backlight(dev);
+
+ psb_intel_lvds_set_power(dev, true);
+}
+
+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/*
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+/*
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
+ int ret = 0;
+
+ if (!IS_MRST(dev))
+ ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * psb_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void psb_intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
+
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int psb_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!encoder)
+ return -1;
+
+ if (!strcmp(property->name, "scaling mode")) {
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
+ uint64_t curval;
+
+ if (!crtc)
+ goto set_prop_error;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ goto set_prop_error;
+ }
+
+ if (drm_object_property_get_value(&connector->base,
+ property,
+ &curval))
+ goto set_prop_error;
+
+ if (curval == value)
+ goto set_prop_done;
+
+ if (drm_object_property_set_value(&connector->base,
+ property,
+ value))
+ goto set_prop_error;
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->primary->fb))
+ goto set_prop_error;
+ }
+ } else if (!strcmp(property->name, "backlight")) {
+ if (drm_object_property_set_value(&connector->base,
+ property,
+ value))
+ goto set_prop_error;
+ else
+ gma_backlight_set(encoder->dev, value);
+ } else if (!strcmp(property->name, "DPMS")) {
+ struct drm_encoder_helper_funcs *hfuncs
+ = encoder->helper_private;
+ hfuncs->dpms(encoder, value);
+ }
+
+set_prop_done:
+ return 0;
+set_prop_error:
+ return -1;
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
+ .dpms = psb_intel_lvds_encoder_dpms,
+ .mode_fixup = psb_intel_lvds_mode_fixup,
+ .prepare = psb_intel_lvds_prepare,
+ .mode_set = psb_intel_lvds_mode_set,
+ .commit = psb_intel_lvds_commit,
+};
+
+const struct drm_connector_helper_funcs
+ psb_intel_lvds_connector_helper_funcs = {
+ .get_modes = psb_intel_lvds_get_modes,
+ .mode_valid = psb_intel_lvds_mode_valid,
+ .best_encoder = gma_best_encoder,
+};
+
+const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = psb_intel_lvds_save,
+ .restore = psb_intel_lvds_restore,
+ .detect = psb_intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_lvds_set_property,
+ .destroy = psb_intel_lvds_destroy,
+};
+
+
+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
+ .destroy = psb_intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * psb_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void psb_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
+ struct psb_intel_lvds_priv *lvds_priv;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 lvds;
+ int pipe;
+
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder) {
+ dev_err(dev->dev, "gma_encoder allocation error\n");
+ return;
+ }
+
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector) {
+ dev_err(dev->dev, "gma_connector allocation error\n");
+ goto failed_encoder;
+ }
+
+ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
+ if (!lvds_priv) {
+ dev_err(dev->dev, "LVDS private allocation error\n");
+ goto failed_connector;
+ }
+
+ gma_encoder->dev_priv = lvds_priv;
+
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
+ drm_connector_init(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder,
+ &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &psb_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*Attach connector properties*/
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_object_attach_property(&connector->base,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ /*
+ * Set up I2C bus
+ * FIXME: distroy i2c_bus when exit
+ */
+ lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+ if (!lvds_priv->i2c_bus) {
+ dev_printk(KERN_ERR,
+ &dev->pdev->dev, "I2C bus registration failed.\n");
+ goto failed_blc_i2c;
+ }
+ lvds_priv->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ if (!lvds_priv->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "DDC bus registration " "failed.\n");
+ goto failed_ddc;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this? */
+ if (mode_dev->vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, mode_dev->vbt_mode);
+
+ if (!mode_dev->panel_fixed_mode)
+ if (dev_priv->lfp_lvds_vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev,
+ dev_priv->lfp_lvds_vbt_mode);
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = REG_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ mode_dev->panel_fixed_mode =
+ psb_intel_crtc_mode_get(dev, crtc);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+ /*
+ * Blacklist machines with BIOSes that list an LVDS panel without
+ * actually having one.
+ */
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ mutex_unlock(&dev->mode_config.mutex);
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+failed_ddc:
+ if (lvds_priv->i2c_bus)
+ psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+failed_blc_i2c:
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+failed_connector:
+ kfree(gma_connector);
+failed_encoder:
+ kfree(gma_encoder);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
new file mode 100644
index 00000000000..4fca0d6feeb
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authers: Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+
+/**
+ * psb_intel_ddc_probe
+ *
+ */
+bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
+{
+ u8 out_buf[] = { 0x0, 0x0 };
+ u8 buf[2];
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(adapter, msgs, 2);
+ if (ret == 2)
+ return true;
+
+ return false;
+}
+
+/**
+ * psb_intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
new file mode 100644
index 00000000000..0be30e4d146
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __PSB_INTEL_REG_H__
+#define __PSB_INTEL_REG_H__
+
+/*
+ * GPIO regs
+ */
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+# define GPIO_CLOCK_DIR_MASK (1 << 0)
+# define GPIO_CLOCK_DIR_IN (0 << 1)
+# define GPIO_CLOCK_DIR_OUT (1 << 1)
+# define GPIO_CLOCK_VAL_MASK (1 << 2)
+# define GPIO_CLOCK_VAL_OUT (1 << 3)
+# define GPIO_CLOCK_VAL_IN (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+# define GPIO_DATA_DIR_MASK (1 << 8)
+# define GPIO_DATA_DIR_IN (0 << 9)
+# define GPIO_DATA_DIR_OUT (1 << 9)
+# define GPIO_DATA_VAL_MASK (1 << 10)
+# define GPIO_DATA_VAL_OUT (1 << 11)
+# define GPIO_DATA_VAL_IN (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
+
+#define BLC_PWM_CTL 0x61254
+#define BLC_PWM_CTL2 0x61250
+#define PWM_ENABLE (1 << 31)
+#define PWM_LEGACY_MODE (1 << 30)
+#define PWM_PIPE_B (1 << 29)
+#define BLC_PWM_CTL_C 0x62254
+#define BLC_PWM_CTL2_C 0x62250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+
+#define I915_GCFGC 0xf0
+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
+
+#define I855_HPLLCC 0xc0
+#define I855_CLOCK_CONTROL_MASK (3 << 0)
+#define I855_CLOCK_133_200 (0 << 0)
+#define I855_CLOCK_100_200 (1 << 0)
+#define I855_CLOCK_100_133 (2 << 0)
+#define I855_CLOCK_166_250 (3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A 0x60000
+#define HBLANK_A 0x60004
+#define HSYNC_A 0x60008
+#define VTOTAL_A 0x6000c
+#define VBLANK_A 0x60010
+#define VSYNC_A 0x60014
+#define PIPEASRC 0x6001c
+#define BCLRPAT_A 0x60020
+#define VSYNCSHIFT_A 0x60028
+
+#define HTOTAL_B 0x61000
+#define HBLANK_B 0x61004
+#define HSYNC_B 0x61008
+#define VTOTAL_B 0x6100c
+#define VBLANK_B 0x61010
+#define VSYNC_B 0x61014
+#define PIPEBSRC 0x6101c
+#define BCLRPAT_B 0x61020
+#define VSYNCSHIFT_B 0x61028
+
+#define HTOTAL_C 0x62000
+#define HBLANK_C 0x62004
+#define HSYNC_C 0x62008
+#define VTOTAL_C 0x6200c
+#define VBLANK_C 0x62010
+#define VSYNC_C 0x62014
+#define PIPECSRC 0x6201c
+#define BCLRPAT_C 0x62020
+#define VSYNCSHIFT_C 0x62028
+
+#define PP_STATUS 0x61200
+# define PP_ON (1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_ON (1 << 28)
+#define PP_SEQUENCE_OFF (2 << 28)
+#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
+
+#define PP_CONTROL 0x61204
+#define POWER_TARGET_ON (1 << 0)
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
+#define PANEL_UNLOCK_MASK (0xffff << 16)
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+
+/* Poulsbo/Oaktrail */
+#define LVDSPP_ON 0x61208
+#define LVDSPP_OFF 0x6120c
+#define PP_CYCLE 0x61210
+
+/* Cedartrail */
+#define PP_ON_DELAYS 0x61208 /* Cedartrail */
+#define PANEL_PORT_SELECT_MASK (3 << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_EDP (1 << 30)
+#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
+#define PP_OFF_DELAYS 0x6120c /* Cedartrail */
+#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
+#define PP_DIVISOR 0x61210 /* Cedartrail */
+#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+
+#define PFIT_CONTROL 0x61230
+#define PFIT_ENABLE (1 << 31)
+#define PFIT_PIPE_MASK (3 << 29)
+#define PFIT_PIPE_SHIFT 29
+#define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
+#define VERT_INTERP_DISABLE (0 << 10)
+#define VERT_INTERP_BILINEAR (1 << 10)
+#define VERT_INTERP_MASK (3 << 10)
+#define VERT_AUTO_SCALE (1 << 9)
+#define HORIZ_INTERP_DISABLE (0 << 6)
+#define HORIZ_INTERP_BILINEAR (1 << 6)
+#define HORIZ_INTERP_MASK (3 << 6)
+#define HORIZ_AUTO_SCALE (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+
+#define PFIT_PGM_RATIOS 0x61234
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+
+#define PFIT_AUTO_RATIOS 0x61238
+
+#define DPLL_A 0x06014
+#define DPLL_B 0x06018
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA0h1_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_LOCK (1 << 15) /* CDV */
+
+/*
+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
+ * in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
+ * TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+/*
+ * PLL_MD
+ */
+/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD 0x0601c
+/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD 0x06020
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define DPLL_TEST 0x606c
+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+#define DPLLB_TEST_N_BYPASS (1 << 19)
+#define DPLLB_TEST_M_BYPASS (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+#define DPLLA_TEST_N_BYPASS (1 << 3)
+#define DPLLA_TEST_M_BYPASS (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
+#define ADPA 0x61100
+#define ADPA_DAC_ENABLE (1 << 31)
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1 << 30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1 << 30)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+
+#define FPA0 0x06040
+#define FPA1 0x06044
+#define FPB0 0x06048
+#define FPB1 0x0604c
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_DIV_SHIFT 0
+
+#define PORT_HOTPLUG_EN 0x61110
+#define HDMIB_HOTPLUG_INT_EN (1 << 29)
+#define HDMIC_HOTPLUG_INT_EN (1 << 28)
+#define HDMID_HOTPLUG_INT_EN (1 << 27)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+/* CDV.. */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+#define CRT_HOTPLUG_DETECT_MASK 0x000000F8
+
+#define PORT_HOTPLUG_STAT 0x61114
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+
+#define SDVOB 0x61140
+#define SDVOC 0x61160
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
+#define SDVO_AUDIO_ENABLE (1 << 6)
+
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16)
+#define SDVO_BORDER_ENABLE (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK (1 << 17)
+
+/*
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS 0x61180
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN (1 << 31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT (1 << 30)
+
+/* Turns on border drawing to allow centered display. */
+#define LVDS_BORDER_EN (1 << 15)
+
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK (3 << 6)
+#define LVDS_A3_POWER_DOWN (0 << 6)
+#define LVDS_A3_POWER_UP (3 << 6)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK (3 << 4)
+#define LVDS_CLKB_POWER_DOWN (0 << 4)
+#define LVDS_CLKB_POWER_UP (3 << 4)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK (3 << 2)
+#define LVDS_B0B3_POWER_DOWN (0 << 2)
+#define LVDS_B0B3_POWER_UP (3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE (1 << 31)
+#define PIPEACONF_DISABLE 0
+#define PIPEACONF_DOUBLE_WIDE (1 << 30)
+#define PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSIPLL_LOCK (1 << 29)
+#define PIPEACONF_SINGLE_WIDE 0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_DSR (1 << 26)
+#define PIPEACONF_PIPE_LOCKED (1 << 25)
+#define PIPEACONF_PALETTE 0
+#define PIPECONF_FORCE_BORDER (1 << 25)
+#define PIPEACONF_GAMMA (1 << 24)
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_PLANE_OFF (1 << 19)
+#define PIPECONF_CURSOR_OFF (1 << 18)
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE (1 << 31)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_DOUBLE_WIDE (1 << 30)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_GAMMA (1 << 24)
+#define PIPEBCONF_PALETTE 0
+
+#define PIPECCONF 0x72008
+
+#define PIPEBGCMAXRED 0x71010
+#define PIPEBGCMAXGREEN 0x71014
+#define PIPEBGCMAXBLUE 0x71018
+
+#define PIPEASTAT 0x70024
+#define PIPEBSTAT 0x71024
+#define PIPECSTAT 0x72024
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2)
+#define PIPE_VBLANK_CLEAR (1 << 1)
+#define PIPE_VBLANK_STATUS (1 << 1)
+#define PIPE_TE_STATUS (1UL << 6)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_VSYNC_CLEAR (1UL << 9)
+#define PIPE_VSYNC_STATUS (1UL << 9)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL << 10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL << 11)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
+#define PIPE_TE_ENABLE (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define PIPE_VSYNC_ENABL (1UL << 25)
+#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
+#define PIPE_FIFO_UNDERRUN (1UL << 31)
+#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
+ PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
+#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
+#define HISTOGRAM_INT_CONTROL 0x61268
+#define HISTOGRAM_BIN_DATA 0X61264
+#define HISTOGRAM_LOGIC_CONTROL 0x61260
+#define PWM_CONTROL_LOGIC 0x61250
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define HISTOGRAM_INTERRUPT_ENABLE (1UL << 31)
+#define HISTOGRAM_LOGIC_ENABLE (1UL << 31)
+#define PWM_LOGIC_ENABLE (1UL << 31)
+#define PWM_PHASEIN_ENABLE (1UL << 25)
+#define PWM_PHASEIN_INT_ENABLE (1UL << 24)
+#define PWM_PHASEIN_VB_COUNT 0x00001f00
+#define PWM_PHASEIN_INC 0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
+#define DPST_YUV_LUMA_MODE 0
+
+struct dpst_ie_histogram_control {
+ union {
+ uint32_t data;
+ struct {
+ uint32_t bin_reg_index:7;
+ uint32_t reserved:4;
+ uint32_t bin_reg_func_select:1;
+ uint32_t sync_to_phase_in:1;
+ uint32_t alt_enhancement_mode:2;
+ uint32_t reserved1:1;
+ uint32_t sync_to_phase_in_count:8;
+ uint32_t histogram_mode_select:1;
+ uint32_t reserved2:4;
+ uint32_t ie_pipe_assignment:1;
+ uint32_t ie_mode_table_enabled:1;
+ uint32_t ie_histogram_enable:1;
+ };
+ };
+};
+
+struct dpst_guardband {
+ union {
+ uint32_t data;
+ struct {
+ uint32_t guardband:22;
+ uint32_t guardband_interrupt_delay:8;
+ uint32_t interrupt_status:1;
+ uint32_t interrupt_enable:1;
+ };
+ };
+};
+
+#define PIPEAFRAMEHIGH 0x70040
+#define PIPEAFRAMEPIXEL 0x70044
+#define PIPEBFRAMEHIGH 0x71040
+#define PIPEBFRAMEPIXEL 0x71044
+#define PIPECFRAMEHIGH 0x72040
+#define PIPECFRAMEPIXEL 0x72044
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+#define FW_BLC_SELF 0x20e0
+#define FW_BLC_SELF_EN (1<<15)
+
+#define DSPARB 0x70030
+#define DSPFW1 0x70034
+#define DSP_FIFO_SR_WM_MASK 0xFF800000
+#define DSP_FIFO_SR_WM_SHIFT 23
+#define CURSOR_B_FIFO_WM_MASK 0x003F0000
+#define CURSOR_B_FIFO_WM_SHIFT 16
+#define DSPFW2 0x70038
+#define CURSOR_A_FIFO_WM_MASK 0x3F00
+#define CURSOR_A_FIFO_WM_SHIFT 8
+#define DSP_PLANE_C_FIFO_WM_MASK 0x7F
+#define DSP_PLANE_C_FIFO_WM_SHIFT 0
+#define DSPFW3 0x7003c
+#define DSPFW4 0x70050
+#define DSPFW5 0x70054
+#define DSP_PLANE_B_FIFO_WM1_SHIFT 24
+#define DSP_PLANE_A_FIFO_WM1_SHIFT 16
+#define CURSOR_B_FIFO_WM1_SHIFT 8
+#define CURSOR_FIFO_SR_WM1_SHIFT 0
+#define DSPFW6 0x70058
+#define DSPCHICKENBIT 0x70400
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DSPCCNTR 0x72180
+#define DISPLAY_PLANE_ENABLE (1 << 31)
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_15_16BPP (0x4 << 26)
+#define DISPPLANE_16BPP (0x5 << 26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6 << 26)
+#define DISPPLANE_32BPP (0x7 << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_SEL_PIPE_MASK (1 << 24)
+#define DISPPLANE_SEL_PIPE_POS 24
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1 << 24)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+#define DISPPLANE_BOTTOM (4)
+
+#define DSPABASE 0x70184
+#define DSPALINOFF 0x70184
+#define DSPASTRIDE 0x70188
+
+#define DSPBBASE 0x71184
+#define DSPBLINOFF 0X71184
+#define DSPBADDR DSPBBASE
+#define DSPBSTRIDE 0x71188
+
+#define DSPCBASE 0x72184
+#define DSPCLINOFF 0x72184
+#define DSPCSTRIDE 0x72188
+
+#define DSPAKEYVAL 0x70194
+#define DSPAKEYMASK 0x70198
+
+#define DSPAPOS 0x7018C /* reserved */
+#define DSPASIZE 0x70190
+#define DSPBPOS 0x7118C
+#define DSPBSIZE 0x71190
+#define DSPCPOS 0x7218C
+#define DSPCSIZE 0x72190
+
+#define DSPASURF 0x7019C
+#define DSPATILEOFF 0x701A4
+
+#define DSPBSURF 0x7119C
+#define DSPBTILEOFF 0x711A4
+
+#define DSPCSURF 0x7219C
+#define DSPCTILEOFF 0x721A4
+#define DSPCKEYMAXVAL 0x721A0
+#define DSPCKEYMINVAL 0x72194
+#define DSPCKEYMSK 0x72198
+
+#define VGACNTRL 0x71400
+#define VGA_DISP_DISABLE (1 << 31)
+#define VGA_2X_MODE (1 << 30)
+#define VGA_PIPE_B_SELECT (1 << 29)
+
+/*
+ * Overlay registers
+ */
+#define OV_C_OFFSET 0x08000
+#define OV_OVADD 0x30000
+#define OV_DOVASTA 0x30008
+# define OV_PIPE_SELECT ((1 << 6)|(1 << 7))
+# define OV_PIPE_SELECT_POS 6
+# define OV_PIPE_A 0
+# define OV_PIPE_C 1
+#define OV_OGAMC5 0x30010
+#define OV_OGAMC4 0x30014
+#define OV_OGAMC3 0x30018
+#define OV_OGAMC2 0x3001C
+#define OV_OGAMC1 0x30020
+#define OV_OGAMC0 0x30024
+#define OVC_OVADD 0x38000
+#define OVC_DOVCSTA 0x38008
+#define OVC_OGAMC5 0x38010
+#define OVC_OGAMC4 0x38014
+#define OVC_OGAMC3 0x38018
+#define OVC_OGAMC2 0x3801C
+#define OVC_OGAMC1 0x38020
+#define OVC_OGAMC0 0x38024
+
+/*
+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+#define SWF0 0x71410
+#define SWF1 0x71414
+#define SWF2 0x71418
+#define SWF3 0x7141c
+#define SWF4 0x71420
+#define SWF5 0x71424
+#define SWF6 0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00 0x70410
+#define SWF01 0x70414
+#define SWF02 0x70418
+#define SWF03 0x7041c
+#define SWF04 0x70420
+#define SWF05 0x70424
+#define SWF06 0x70428
+
+#define SWF10 SWF0
+#define SWF11 SWF1
+#define SWF12 SWF2
+#define SWF13 SWF3
+#define SWF14 SWF4
+#define SWF15 SWF5
+#define SWF16 SWF6
+
+#define SWF30 0x72414
+#define SWF31 0x72418
+#define SWF32 0x7241c
+
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A 0x0a000
+#define PALETTE_B 0x0a800
+#define PALETTE_C 0x0ac00
+
+/* Cursor A & B regs */
+#define CURACNTR 0x70080
+#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURABASE 0x70084
+#define CURAPOS 0x70088
+#define CURSOR_POS_MASK 0x007FF
+#define CURSOR_POS_SIGN 0x8000
+#define CURSOR_X_SHIFT 0
+#define CURSOR_Y_SHIFT 16
+#define CURBCNTR 0x700c0
+#define CURBBASE 0x700c4
+#define CURBPOS 0x700c8
+#define CURCCNTR 0x700e0
+#define CURCBASE 0x700e4
+#define CURCPOS 0x700e8
+
+/*
+ * Interrupt Registers
+ */
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
+
+/*
+ * MOORESTOWN delta registers
+ */
+#define MRST_DPLL_A 0x0f014
+#define MDFLD_DPLL_B 0x0f018
+#define MDFLD_INPUT_REF_SEL (1 << 14)
+#define MDFLD_VCO_SEL (1 << 16)
+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
+#define MDFLD_PLL_LATCHEN (1 << 28)
+#define MDFLD_PWR_GATE_EN (1 << 30)
+#define MDFLD_P1_MASK (0x1FF << 17)
+#define MRST_FPA0 0x0f040
+#define MRST_FPA1 0x0f044
+#define MDFLD_DPLL_DIV0 0x0f048
+#define MDFLD_DPLL_DIV1 0x0f04c
+#define MRST_PERF_MODE 0x020f4
+
+/*
+ * MEDFIELD HDMI registers
+ */
+#define HDMIPHYMISCCTL 0x61134
+#define HDMI_PHY_POWER_DOWN 0x7f
+#define HDMIB_CONTROL 0x61140
+#define HDMIB_PORT_EN (1 << 31)
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+#define HDMIB_NULL_PACKET (1 << 9)
+#define HDMIB_HDCP_PORT (1 << 5)
+
+/* #define LVDS 0x61180 */
+#define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
+
+#define MIPI 0x61190
+#define MIPI_C 0x62190
+#define MIPI_PORT_EN (1 << 31)
+/* Turns on border drawing to allow centered display. */
+#define SEL_FLOPPED_HSTX (1 << 23)
+#define PASS_FROM_SPHY_TO_AFE (1 << 16)
+#define MIPI_BORDER_EN (1 << 15)
+#define MIPIA_3LANE_MIPIC_1LANE 0x1
+#define MIPIA_2LANE_MIPIC_2LANE 0x2
+#define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
+#define TE_TRIGGER_GPIO_PIN (1 << 3)
+#define MIPI_TE_COUNT 0x61194
+
+/* #define PP_CONTROL 0x61204 */
+#define POWER_DOWN_ON_RESET (1 << 1)
+
+/* #define PFIT_CONTROL 0x61230 */
+#define PFIT_PIPE_SELECT (3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT (29)
+
+/* #define BLC_PWM_CTL 0x61254 */
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
+
+/* #define PIPEACONF 0x70008 */
+#define PIPEACONF_PIPE_STATE (1 << 30)
+/* #define DSPACNTR 0x70180 */
+
+#define MRST_DSPABASE 0x7019c
+#define MRST_DSPBBASE 0x7119c
+#define MDFLD_DSPCBASE 0x7219c
+
+/*
+ * Moorestown registers.
+ */
+
+/*
+ * MIPI IP registers
+ */
+#define MIPIC_REG_OFFSET 0x800
+
+#define DEVICE_READY_REG 0xb000
+#define LP_OUTPUT_HOLD (1 << 16)
+#define EXIT_ULPS_DEV_READY 0x3
+#define LP_OUTPUT_HOLD_RELEASE 0x810000
+# define ENTERING_ULPS (2 << 1)
+# define EXITING_ULPS (1 << 1)
+# define ULPS_MASK (3 << 1)
+# define BUS_POSSESSION (1 << 3)
+#define INTR_STAT_REG 0xb004
+#define RX_SOT_ERROR (1 << 0)
+#define RX_SOT_SYNC_ERROR (1 << 1)
+#define RX_ESCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RX_HS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_FALSE_CONTROL_ERROR (1 << 6)
+#define RX_ECC_SINGLE_BIT_ERROR (1 << 7)
+#define RX_ECC_MULTI_BIT_ERROR (1 << 8)
+#define RX_CHECKSUM_ERROR (1 << 9)
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 10)
+#define RX_DSI_VC_ID_INVALID (1 << 11)
+#define TX_FALSE_CONTROL_ERROR (1 << 12)
+#define TX_ECC_SINGLE_BIT_ERROR (1 << 13)
+#define TX_ECC_MULTI_BIT_ERROR (1 << 14)
+#define TX_CHECKSUM_ERROR (1 << 15)
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 16)
+#define TX_DSI_VC_ID_INVALID (1 << 17)
+#define HIGH_CONTENTION (1 << 18)
+#define LOW_CONTENTION (1 << 19)
+#define DPI_FIFO_UNDER_RUN (1 << 20)
+#define HS_TX_TIMEOUT (1 << 21)
+#define LP_RX_TIMEOUT (1 << 22)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define SPL_PKT_SENT (1 << 30)
+#define INTR_EN_REG 0xb008
+#define DSI_FUNC_PRG_REG 0xb00c
+#define DPI_CHANNEL_NUMBER_POS 0x03
+#define DBI_CHANNEL_NUMBER_POS 0x05
+#define FMT_DPI_POS 0x07
+#define FMT_DBI_POS 0x0A
+#define DBI_DATA_WIDTH_POS 0x0D
+
+/* DPI PIXEL FORMATS */
+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
+ * 666 FORMAT
+ */
+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
+
+#define DBI_NOT_SUPPORTED 0x00 /* command mode
+ * is not supported
+ */
+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
+#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
+#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
+
+#define HS_TX_TIMEOUT_REG 0xb010
+#define LP_RX_TIMEOUT_REG 0xb014
+#define TURN_AROUND_TIMEOUT_REG 0xb018
+#define DEVICE_RESET_REG 0xb01C
+#define DPI_RESOLUTION_REG 0xb020
+#define RES_V_POS 0x10
+#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
+#define VERT_SYNC_PAD_COUNT_REG 0xb038
+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
+#define DPI_CONTROL_REG 0xb048
+#define DPI_SHUT_DOWN (1 << 0)
+#define DPI_TURN_ON (1 << 1)
+#define DPI_COLOR_MODE_ON (1 << 2)
+#define DPI_COLOR_MODE_OFF (1 << 3)
+#define DPI_BACK_LIGHT_ON (1 << 4)
+#define DPI_BACK_LIGHT_OFF (1 << 5)
+#define DPI_LP (1 << 6)
+#define DPI_DATA_REG 0xb04c
+#define DPI_BACK_LIGHT_ON_DATA 0x07
+#define DPI_BACK_LIGHT_OFF_DATA 0x17
+#define INIT_COUNT_REG 0xb050
+#define MAX_RET_PAK_REG 0xb054
+#define VIDEO_FMT_REG 0xb058
+#define COMPLETE_LAST_PCKT (1 << 2)
+#define EOT_DISABLE_REG 0xb05c
+#define ENABLE_CLOCK_STOPPING (1 << 1)
+#define LP_BYTECLK_REG 0xb060
+#define LP_GEN_DATA_REG 0xb064
+#define HS_GEN_DATA_REG 0xb068
+#define LP_GEN_CTRL_REG 0xb06C
+#define HS_GEN_CTRL_REG 0xb070
+#define DCS_CHANNEL_NUMBER_POS 0x6
+#define MCS_COMMANDS_POS 0x8
+#define WORD_COUNTS_POS 0x8
+#define MCS_PARAMETER_POS 0x10
+#define GEN_FIFO_STAT_REG 0xb074
+#define HS_DATA_FIFO_FULL (1 << 0)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define HS_LS_DBI_ENABLE_REG 0xb078
+#define TXCLKESC_REG 0xb07c
+#define DPHY_PARAM_REG 0xb080
+#define DBI_BW_CTRL_REG 0xb084
+#define CLK_LANE_SWT_REG 0xb088
+
+/*
+ * MIPI Adapter registers
+ */
+#define MIPI_CONTROL_REG 0xb104
+#define MIPI_2X_CLOCK_BITS ((1 << 0) | (1 << 1))
+#define MIPI_DATA_ADDRESS_REG 0xb108
+#define MIPI_DATA_LENGTH_REG 0xb10C
+#define MIPI_COMMAND_ADDRESS_REG 0xb110
+#define MIPI_COMMAND_LENGTH_REG 0xb114
+#define MIPI_READ_DATA_RETURN_REG0 0xb118
+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
+#define MIPI_READ_DATA_RETURN_REG2 0xb120
+#define MIPI_READ_DATA_RETURN_REG3 0xb124
+#define MIPI_READ_DATA_RETURN_REG4 0xb128
+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
+#define MIPI_READ_DATA_RETURN_REG6 0xb130
+#define MIPI_READ_DATA_RETURN_REG7 0xb134
+#define MIPI_READ_DATA_VALID_REG 0xb138
+
+/* DBI COMMANDS */
+#define soft_reset 0x01
+/*
+ * The display module performs a software reset.
+ * Registers are written with their SW Reset default values.
+ */
+#define get_power_mode 0x0a
+/*
+ * The display module returns the current power mode
+ */
+#define get_address_mode 0x0b
+/*
+ * The display module returns the current status.
+ */
+#define get_pixel_format 0x0c
+/*
+ * This command gets the pixel format for the RGB image data
+ * used by the interface.
+ */
+#define get_display_mode 0x0d
+/*
+ * The display module returns the Display Image Mode status.
+ */
+#define get_signal_mode 0x0e
+/*
+ * The display module returns the Display Signal Mode.
+ */
+#define get_diagnostic_result 0x0f
+/*
+ * The display module returns the self-diagnostic results following
+ * a Sleep Out command.
+ */
+#define enter_sleep_mode 0x10
+/*
+ * This command causes the display module to enter the Sleep mode.
+ * In this mode, all unnecessary blocks inside the display module are
+ * disabled except interface communication. This is the lowest power
+ * mode the display module supports.
+ */
+#define exit_sleep_mode 0x11
+/*
+ * This command causes the display module to exit Sleep mode.
+ * All blocks inside the display module are enabled.
+ */
+#define enter_partial_mode 0x12
+/*
+ * This command causes the display module to enter the Partial Display
+ * Mode. The Partial Display Mode window is described by the
+ * set_partial_area command.
+ */
+#define enter_normal_mode 0x13
+/*
+ * This command causes the display module to enter the Normal mode.
+ * Normal Mode is defined as Partial Display mode and Scroll mode are off
+ */
+#define exit_invert_mode 0x20
+/*
+ * This command causes the display module to stop inverting the image
+ * data on the display device. The frame memory contents remain unchanged.
+ * No status bits are changed.
+ */
+#define enter_invert_mode 0x21
+/*
+ * This command causes the display module to invert the image data only on
+ * the display device. The frame memory contents remain unchanged.
+ * No status bits are changed.
+ */
+#define set_gamma_curve 0x26
+/*
+ * This command selects the desired gamma curve for the display device.
+ * Four fixed gamma curves are defined in section DCS spec.
+ */
+#define set_display_off 0x28
+/* ************************************************************************* *\
+This command causes the display module to stop displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_display_on 0x29
+/* ************************************************************************* *\
+This command causes the display module to start displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_column_address 0x2a
+/*
+ * This command defines the column extent of the frame memory accessed by
+ * the hostprocessor with the read_memory_continue and
+ * write_memory_continue commands.
+ * No status bits are changed.
+ */
+#define set_page_addr 0x2b
+/*
+ * This command defines the page extent of the frame memory accessed by
+ * the host processor with the write_memory_continue and
+ * read_memory_continue command.
+ * No status bits are changed.
+ */
+#define write_mem_start 0x2c
+/*
+ * This command transfers image data from the host processor to the
+ * display modules frame memory starting at the pixel location specified
+ * by preceding set_column_address and set_page_address commands.
+ */
+#define set_partial_area 0x30
+/*
+ * This command defines the Partial Display mode s display area.
+ * There are two parameters associated with this command, the first
+ * defines the Start Row (SR) and the second the End Row (ER). SR and ER
+ * refer to the Frame Memory Line Pointer.
+ */
+#define set_scroll_area 0x33
+/*
+ * This command defines the display modules Vertical Scrolling Area.
+ */
+#define set_tear_off 0x34
+/*
+ * This command turns off the display modules Tearing Effect output
+ * signal on the TE signal line.
+ */
+#define set_tear_on 0x35
+/*
+ * This command turns on the display modules Tearing Effect output signal
+ * on the TE signal line.
+ */
+#define set_address_mode 0x36
+/*
+ * This command sets the data order for transfers from the host processor
+ * to display modules frame memory,bits B[7:5] and B3, and from the
+ * display modules frame memory to the display device, bits B[2:0] and B4.
+ */
+#define set_scroll_start 0x37
+/*
+ * This command sets the start of the vertical scrolling area in the frame
+ * memory. The vertical scrolling area is fully defined when this command
+ * is used with the set_scroll_area command The set_scroll_start command
+ * has one parameter, the Vertical Scroll Pointer. The VSP defines the
+ * line in the frame memory that is written to the display device as the
+ * first line of the vertical scroll area.
+ */
+#define exit_idle_mode 0x38
+/*
+ * This command causes the display module to exit Idle mode.
+ */
+#define enter_idle_mode 0x39
+/*
+ * This command causes the display module to enter Idle Mode.
+ * In Idle Mode, color expression is reduced. Colors are shown on the
+ * display device using the MSB of each of the R, G and B color
+ * components in the frame memory
+ */
+#define set_pixel_format 0x3a
+/*
+ * This command sets the pixel format for the RGB image data used by the
+ * interface.
+ * Bits D[6:4] DPI Pixel Format Definition
+ * Bits D[2:0] DBI Pixel Format Definition
+ * Bits D7 and D3 are not used.
+ */
+#define DCS_PIXEL_FORMAT_3bpp 0x1
+#define DCS_PIXEL_FORMAT_8bpp 0x2
+#define DCS_PIXEL_FORMAT_12bpp 0x3
+#define DCS_PIXEL_FORMAT_16bpp 0x5
+#define DCS_PIXEL_FORMAT_18bpp 0x6
+#define DCS_PIXEL_FORMAT_24bpp 0x7
+
+#define write_mem_cont 0x3c
+
+/*
+ * This command transfers image data from the host processor to the
+ * display module's frame memory continuing from the pixel location
+ * following the previous write_memory_continue or write_memory_start
+ * command.
+ */
+#define set_tear_scanline 0x44
+/*
+ * This command turns on the display modules Tearing Effect output signal
+ * on the TE signal line when the display module reaches line N.
+ */
+#define get_scanline 0x45
+/*
+ * The display module returns the current scanline, N, used to update the
+ * display device. The total number of scanlines on a display device is
+ * defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+ * the first line of V Sync and is denoted as Line 0.
+ * When in Sleep Mode, the value returned by get_scanline is undefined.
+ */
+
+/* MCS or Generic COMMANDS */
+/* MCS/generic data type */
+#define GEN_SHORT_WRITE_0 0x03 /* generic short write, no parameters */
+#define GEN_SHORT_WRITE_1 0x13 /* generic short write, 1 parameters */
+#define GEN_SHORT_WRITE_2 0x23 /* generic short write, 2 parameters */
+#define GEN_READ_0 0x04 /* generic read, no parameters */
+#define GEN_READ_1 0x14 /* generic read, 1 parameters */
+#define GEN_READ_2 0x24 /* generic read, 2 parameters */
+#define GEN_LONG_WRITE 0x29 /* generic long write */
+#define MCS_SHORT_WRITE_0 0x05 /* MCS short write, no parameters */
+#define MCS_SHORT_WRITE_1 0x15 /* MCS short write, 1 parameters */
+#define MCS_READ 0x06 /* MCS read, no parameters */
+#define MCS_LONG_WRITE 0x39 /* MCS long write */
+/* MCS/generic commands */
+/* TPO MCS */
+#define write_display_profile 0x50
+#define write_display_brightness 0x51
+#define write_ctrl_display 0x53
+#define write_ctrl_cabc 0x55
+ #define UI_IMAGE 0x01
+ #define STILL_IMAGE 0x02
+ #define MOVING_IMAGE 0x03
+#define write_hysteresis 0x57
+#define write_gamma_setting 0x58
+#define write_cabc_min_bright 0x5e
+#define write_kbbc_profile 0x60
+/* TMD MCS */
+#define tmd_write_display_brightness 0x8c
+
+/*
+ * This command is used to control ambient light, panel backlight
+ * brightness and gamma settings.
+ */
+#define BRIGHT_CNTL_BLOCK_ON (1 << 5)
+#define AMBIENT_LIGHT_SENSE_ON (1 << 4)
+#define DISPLAY_DIMMING_ON (1 << 3)
+#define BACKLIGHT_ON (1 << 2)
+#define DISPLAY_BRIGHTNESS_AUTO (1 << 1)
+#define GAMMA_AUTO (1 << 0)
+
+/* DCS Interface Pixel Formats */
+#define DCS_PIXEL_FORMAT_3BPP 0x1
+#define DCS_PIXEL_FORMAT_8BPP 0x2
+#define DCS_PIXEL_FORMAT_12BPP 0x3
+#define DCS_PIXEL_FORMAT_16BPP 0x5
+#define DCS_PIXEL_FORMAT_18BPP 0x6
+#define DCS_PIXEL_FORMAT_24BPP 0x7
+/* ONE PARAMETER READ DATA */
+#define addr_mode_data 0xfc
+#define diag_res_data 0x00
+#define disp_mode_data 0x23
+#define pxl_fmt_data 0x77
+#define pwr_mode_data 0x74
+#define sig_mode_data 0x00
+/* TWO PARAMETERS READ DATA */
+#define scanline_data1 0xff
+#define scanline_data2 0xff
+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
+ * with Sync Pulse
+ */
+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
+ * with Sync events
+ */
+#define BURST_MODE 0x03 /* Burst Mode */
+#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */
+ /* Allocate at least
+ * 0x100 Byte with 32
+ * byte alignment
+ */
+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
+ * 0x100 Byte with 32
+ * byte alignment
+ */
+#define DBI_CB_TIME_OUT 0xFFFF
+
+#define GEN_FB_TIME_OUT 2000
+
+#define SKU_83 0x01
+#define SKU_100 0x02
+#define SKU_100L 0x04
+#define SKU_BYPASS 0x08
+
+/* Some handy macros for playing with bitfields. */
+#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/* PCI config space */
+
+#define SB_PCKT 0x02100 /* cedarview */
+# define SB_OPCODE_MASK PSB_MASK(31, 16)
+# define SB_OPCODE_SHIFT 16
+# define SB_OPCODE_READ 0
+# define SB_OPCODE_WRITE 1
+# define SB_DEST_MASK PSB_MASK(15, 8)
+# define SB_DEST_SHIFT 8
+# define SB_DEST_DPLL 0x88
+# define SB_BYTE_ENABLE_MASK PSB_MASK(7, 4)
+# define SB_BYTE_ENABLE_SHIFT 4
+# define SB_BUSY (1 << 0)
+
+#define DSPCLK_GATE_D 0x6200
+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */
+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6)
+# define DPUNIT_PIPEB_GATE_DISABLE (1 << 30)
+# define DPUNIT_PIPEA_GATE_DISABLE (1 << 25)
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13)
+
+#define RAMCLK_GATE_D 0x6210
+
+/* 32-bit value read/written from the DPIO reg. */
+#define SB_DATA 0x02104 /* cedarview */
+/* 32-bit address of the DPIO reg to be read/written. */
+#define SB_ADDR 0x02108 /* cedarview */
+#define DPIO_CFG 0x02110 /* cedarview */
+# define DPIO_MODE_SELECT_1 (1 << 3)
+# define DPIO_MODE_SELECT_0 (1 << 2)
+# define DPIO_SFR_BYPASS (1 << 1)
+/* reset is active low */
+# define DPIO_CMN_RESET_N (1 << 0)
+
+/* Cedarview sideband registers */
+#define _SB_M_A 0x8008
+#define _SB_M_B 0x8028
+#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+# define SB_M_DIVIDER_MASK (0xFF << 24)
+# define SB_M_DIVIDER_SHIFT 24
+
+#define _SB_N_VCO_A 0x8014
+#define _SB_N_VCO_B 0x8034
+#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+#define SB_N_VCO_SEL_MASK PSB_MASK(31, 30)
+#define SB_N_VCO_SEL_SHIFT 30
+#define SB_N_DIVIDER_MASK PSB_MASK(29, 26)
+#define SB_N_DIVIDER_SHIFT 26
+#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
+#define SB_N_CB_TUNE_SHIFT 24
+
+/* the bit 14:13 is used to select between the different reference clock for Pipe A/B */
+#define SB_REF_DPLLA 0x8010
+#define SB_REF_DPLLB 0x8030
+#define REF_CLK_MASK (0x3 << 13)
+#define REF_CLK_CORE (0 << 13)
+#define REF_CLK_DPLL (1 << 13)
+#define REF_CLK_DPLLA (2 << 13)
+/* For the DPLL B, it will use the reference clk from DPLL A when using (2 << 13) */
+
+#define _SB_REF_A 0x8018
+#define _SB_REF_B 0x8038
+#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
+
+#define _SB_P_A 0x801c
+#define _SB_P_B 0x803c
+#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+#define SB_P2_DIVIDER_MASK PSB_MASK(31, 30)
+#define SB_P2_DIVIDER_SHIFT 30
+#define SB_P2_10 0 /* HDMI, DP, DAC */
+#define SB_P2_5 1 /* DAC */
+#define SB_P2_14 2 /* LVDS single */
+#define SB_P2_7 3 /* LVDS double */
+#define SB_P1_DIVIDER_MASK PSB_MASK(15, 12)
+#define SB_P1_DIVIDER_SHIFT 12
+
+#define PSB_LANE0 0x120
+#define PSB_LANE1 0x220
+#define PSB_LANE2 0x2320
+#define PSB_LANE3 0x2420
+
+#define LANE_PLL_MASK (0x7 << 20)
+#define LANE_PLL_ENABLE (0x3 << 20)
+#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
+
+#define DP_B 0x64100
+#define DP_C 0x64200
+
+#define DP_PORT_EN (1 << 31)
+#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define DP_LINK_TRAIN_PAT_1 (0 << 28)
+#define DP_LINK_TRAIN_PAT_2 (1 << 28)
+#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
+#define DP_LINK_TRAIN_OFF (3 << 28)
+#define DP_LINK_TRAIN_MASK (3 << 28)
+#define DP_LINK_TRAIN_SHIFT 28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define DP_VOLTAGE_0_4 (0 << 25)
+#define DP_VOLTAGE_0_6 (1 << 25)
+#define DP_VOLTAGE_0_8 (2 << 25)
+#define DP_VOLTAGE_1_2 (3 << 25)
+#define DP_VOLTAGE_MASK (7 << 25)
+#define DP_VOLTAGE_SHIFT 25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define DP_PRE_EMPHASIS_0 (0 << 22)
+#define DP_PRE_EMPHASIS_3_5 (1 << 22)
+#define DP_PRE_EMPHASIS_6 (2 << 22)
+#define DP_PRE_EMPHASIS_9_5 (3 << 22)
+#define DP_PRE_EMPHASIS_MASK (7 << 22)
+#define DP_PRE_EMPHASIS_SHIFT 22
+
+/* How many wires to use. I guess 3 was too hard */
+#define DP_PORT_WIDTH_1 (0 << 19)
+#define DP_PORT_WIDTH_2 (1 << 19)
+#define DP_PORT_WIDTH_4 (3 << 19)
+#define DP_PORT_WIDTH_MASK (7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define DP_ENHANCED_FRAMING (1 << 18)
+
+/** locked once port is enabled */
+#define DP_PORT_REVERSAL (1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
+
+#define DP_SCRAMBLING_DISABLE (1 << 12)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define DP_COLOR_RANGE_16_235 (1 << 8)
+
+/** Turn on the audio link */
+#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
+
+/** vs and hs sync polarity */
+#define DP_SYNC_VS_HIGH (1 << 4)
+#define DP_SYNC_HS_HIGH (1 << 3)
+
+/** A fantasy */
+#define DP_DETECTED (1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL 0x64110
+#define DPB_AUX_CH_DATA1 0x64114
+#define DPB_AUX_CH_DATA2 0x64118
+#define DPB_AUX_CH_DATA3 0x6411c
+#define DPB_AUX_CH_DATA4 0x64120
+#define DPB_AUX_CH_DATA5 0x64124
+
+#define DPC_AUX_CH_CTL 0x64210
+#define DPC_AUX_CH_DATA1 0x64214
+#define DPC_AUX_CH_DATA2 0x64218
+#define DPC_AUX_CH_DATA3 0x6421c
+#define DPC_AUX_CH_DATA4 0x64220
+#define DPC_AUX_CH_DATA5 0x64224
+
+#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
+#define DP_AUX_CH_CTL_DONE (1 << 30)
+#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
+#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
+#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
+#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
+#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
+#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
+#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
+#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
+#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+
+#define _PIPEA_GMCH_DATA_M 0x70050
+#define _PIPEB_GMCH_DATA_M 0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
+#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
+
+#define PIPE_GMCH_DATA_M_MASK (0xffffff)
+
+#define _PIPEA_GMCH_DATA_N 0x70054
+#define _PIPEB_GMCH_DATA_N 0x71054
+#define PIPE_GMCH_DATA_N_MASK (0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M 0x70060
+#define _PIPEB_DP_LINK_M 0x71060
+#define PIPEA_DP_LINK_M_MASK (0xffffff)
+
+#define _PIPEA_DP_LINK_N 0x70064
+#define _PIPEB_DP_LINK_N 0x71064
+#define PIPEA_DP_LINK_N_MASK (0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+#define PIPE_BPC_MASK (7 << 5)
+#define PIPE_8BPC (0 << 5)
+#define PIPE_10BPC (1 << 5)
+#define PIPE_6BPC (2 << 5)
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644
index 00000000000..deeb0829b12
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -0,0 +1,2643 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_reg.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct psb_intel_sdvo {
+ struct gma_encoder base;
+
+ struct i2c_adapter *i2c;
+ u8 slave_addr;
+
+ struct i2c_adapter ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ int sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
+ struct psb_intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ uint16_t attached_output;
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /* This is for current tv format name */
+ int tv_format_index;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+
+ /**
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+ /* Input timings for adjusted_mode */
+ struct psb_intel_sdvo_dtd input_dtd;
+
+ /* Saved SDVO output states */
+ uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
+};
+
+struct psb_intel_sdvo_connector {
+ struct gma_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ int force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
+};
+
+static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct psb_intel_sdvo, base.base);
+}
+
+static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+ return container_of(gma_attached_encoder(connector),
+ struct psb_intel_sdvo, base);
+}
+
+static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
+static bool
+psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type);
+static bool
+psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ u32 bval = val, cval = val;
+ int i, j;
+ int need_aux = IS_MRST(dev) ? 1 : 0;
+
+ for (j = 0; j <= need_aux; j++) {
+ if (psb_intel_sdvo->sdvo_reg == SDVOB)
+ cval = REG_READ_WITH_AUX(SDVOC, j);
+ else
+ bval = REG_READ_WITH_AUX(SDVOB, j);
+
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++) {
+ REG_WRITE_WITH_AUX(SDVOB, bval, j);
+ REG_READ_WITH_AUX(SDVOB, j);
+ REG_WRITE_WITH_AUX(SDVOC, cval, j);
+ REG_READ_WITH_AUX(SDVOC, j);
+ }
+ }
+}
+
+static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ch,
+ }
+ };
+ int ret;
+
+ if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
+ return true;
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+ u8 cmd;
+ const char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg) (reg == SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s: W: %02X ",
+ SDVO_NAME(psb_intel_sdvo), cmd);
+ for (i = 0; i < args_len; i++)
+ DRM_DEBUG_KMS("%02X ", ((u8 *)args)[i]);
+ for (; i < 8; i++)
+ DRM_DEBUG_KMS(" ");
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ DRM_DEBUG_KMS("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(sdvo_cmd_names))
+ DRM_DEBUG_KMS("(%02X)", cmd);
+ DRM_DEBUG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ u8 buf[args_len*2 + 2], status;
+ struct i2c_msg msgs[args_len + 3];
+ int i, ret;
+
+ psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return false;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+
+ return true;
+}
+
+static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
+ void *response, int response_len)
+{
+ u8 retry = 5;
+ u8 status;
+ int i;
+
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
+ udelay(15);
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
+
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_DEBUG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_DEBUG_KMS("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_DEBUG_KMS(" %02X", ((u8 *)response)[i]);
+ }
+ DRM_DEBUG_KMS("\n");
+ return true;
+
+log_fail:
+ DRM_DEBUG_KMS("... failed\n");
+ return false;
+}
+
+static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
+}
+
+static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
+}
+
+static bool
+psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
+}
+
+static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_set_target_input_args targets = {0};
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct psb_intel_sdvo_get_trained_inputs_response response;
+
+ BUILD_BUG_ON(sizeof(response) != 1);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct psb_intel_sdvo_pixel_clock_range clocks;
+
+ BUILD_BUG_ON(sizeof(clocks) != 4);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct psb_intel_sdvo_preferred_input_timing_args args;
+
+ memset(&args, 0, sizeof(args));
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ args.interlace = 0;
+
+ if (psb_intel_sdvo->is_lvds &&
+ (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+ dtd->part1.clock = mode->clock / 10;
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= 0x4;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ const struct psb_intel_sdvo_dtd *dtd)
+{
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & 0x2)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & 0x4)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_encode encode;
+
+ BUILD_BUG_ON(sizeof(encode) != 2);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ psb_intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ psb_intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ DRM_INFO("HDMI is not supported yet");
+
+ return false;
+#if 0
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+ uint8_t set_buf_index[2] = { 1, 0 };
+ uint64_t *data = (uint64_t *)&avi_if;
+ unsigned i;
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ for (i = 0; i < sizeof(avi_if); i += 8) {
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ data, 8))
+ return false;
+ data++;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+#endif
+}
+
+static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map;
+
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+static bool
+psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ const struct drm_display_mode *mode)
+{
+ struct psb_intel_sdvo_dtd output_dtd;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return false;
+
+ psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return false;
+
+ if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
+
+ if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
+ &psb_intel_sdvo->input_dtd))
+ return false;
+
+ psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ return true;
+}
+
+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ int multiplier;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (psb_intel_sdvo->is_tv) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (psb_intel_sdvo->is_lvds) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+ return true;
+}
+
+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 sdvox;
+ struct psb_intel_sdvo_in_out_map in_out;
+ struct psb_intel_sdvo_dtd input_dtd;
+ int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
+ int need_aux = IS_MRST(dev) ? 1 : 0;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = psb_intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
+ input_dtd = psb_intel_sdvo->input_dtd;
+ } else {
+ /* Set the output timing to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ (void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
+ }
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return;
+
+ if (psb_intel_sdvo->has_hdmi_monitor) {
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
+ psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
+ } else
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (psb_intel_sdvo->is_tv &&
+ !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
+ return;
+
+ (void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
+
+ switch (pixel_multiplier) {
+ default:
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ if (need_aux)
+ sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+ else
+ sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+
+ switch (psb_intel_sdvo->sdvo_reg) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+
+ if (gma_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
+ if (psb_intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ /* FIXME: Check if this is needed for PSB
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ */
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ sdvox |= SDVO_STALL_SELECT;
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
+}
+
+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 temp;
+ int i;
+ int need_aux = IS_MRST(dev) ? 1 : 0;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ DRM_DEBUG("DPMS_ON");
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG("DPMS_OFF");
+ break;
+ default:
+ DRM_DEBUG("DPMS: %d", mode);
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+
+ if (mode == DRM_MODE_DPMS_OFF) {
+ if (need_aux)
+ temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+ else
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
+ if ((temp & SDVO_ENABLE) != 0) {
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+ }
+ } else {
+ bool input1, input2;
+ u8 status;
+
+ if (need_aux)
+ temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+ else
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
+ if ((temp & SDVO_ENABLE) == 0)
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+
+ for (i = 0; i < 2; i++)
+ gma_wait_for_vblank(dev);
+
+ status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(psb_intel_sdvo));
+ }
+
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
+ }
+ return;
+}
+
+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (psb_intel_sdvo->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (psb_intel_sdvo->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ if (psb_intel_sdvo->is_lvds) {
+ if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
+{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+/* No use! */
+#if 0
+struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+ struct drm_connector *connector = NULL;
+ struct psb_intel_sdvo *iout = NULL;
+ struct psb_intel_sdvo *sdvo;
+
+ /* find the sdvo connector */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ iout = to_psb_intel_sdvo(connector);
+
+ if (iout->type != INTEL_OUTPUT_SDVO)
+ continue;
+
+ sdvo = iout->dev_priv;
+
+ if (sdvo->sdvo_reg == SDVOB && sdvoB)
+ return connector;
+
+ if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+ return connector;
+
+ }
+
+ return NULL;
+}
+
+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ DRM_DEBUG_KMS("\n");
+
+ if (!connector)
+ return 0;
+
+ psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
+}
+
+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ if (on) {
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ } else {
+ response[0] = 0;
+ response[1] = 0;
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ }
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+}
+#endif
+
+static bool
+psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ int caps = psb_intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
+}
+
+static struct edid *
+psb_intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
+ u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ psb_intel_sdvo->ddc_bus = ddc;
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ psb_intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (psb_intel_sdvo->is_hdmi) {
+ psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ kfree(edid);
+ }
+
+ if (status == connector_status_connected) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ if (psb_intel_sdvo_connector->force_audio)
+ psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
+ }
+
+ return status;
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+
+ /* add 30ms delay when the output type might be TV */
+ if (psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ mdelay(30);
+
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ psb_intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ psb_intel_sdvo->attached_output = response;
+
+ psb_intel_sdvo->has_hdmi_monitor = false;
+ psb_intel_sdvo->has_hdmi_audio = false;
+
+ if ((psb_intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(psb_intel_sdvo_connector))
+ ret = psb_intel_sdvo_hdmi_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
+ }
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->is_lvds = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
+}
+
+static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
+
+ if (connector_is_digital == monitor_is_digital) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
+
+ kfree(edid);
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
+ return;
+
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode)
+ drm_mode_probed_add(connector, nmode);
+ }
+}
+
+static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
+ if (list_empty(&connector->probed_modes) == false)
+ goto end;
+
+ /* Fetch modes from VBT */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+
+end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ psb_intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
+ psb_intel_sdvo->is_lvds = true;
+ break;
+ }
+ }
+
+}
+
+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_lvds_modes(connector);
+ else
+ psb_intel_sdvo_get_ddc_modes(connector);
+
+ return !list_empty(&connector->probed_modes);
+}
+
+static void
+psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (psb_intel_sdvo_connector->left)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->left);
+ if (psb_intel_sdvo_connector->right)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->right);
+ if (psb_intel_sdvo_connector->top)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->top);
+ if (psb_intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
+ if (psb_intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
+ if (psb_intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
+ if (psb_intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
+ if (psb_intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
+ if (psb_intel_sdvo_connector->hue)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
+ if (psb_intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
+ if (psb_intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
+ if (psb_intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
+ if (psb_intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
+ if (psb_intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
+ if (psb_intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
+ if (psb_intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
+ if (psb_intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
+}
+
+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (psb_intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ psb_intel_sdvo_connector->tv_format);
+
+ psb_intel_sdvo_destroy_enhance_property(connector);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!psb_intel_sdvo->is_hdmi)
+ return false;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ return has_audio;
+}
+
+static int
+psb_intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == psb_intel_sdvo_connector->force_audio)
+ return 0;
+
+ psb_intel_sdvo_connector->force_audio = i;
+
+ if (i == 0)
+ has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == psb_intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ psb_intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!psb_intel_sdvo->color_range)
+ return 0;
+
+ psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (psb_intel_sdvo_connector->name == property) { \
+ if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ psb_intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
+
+ if (property == psb_intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (psb_intel_sdvo->tv_format_index ==
+ psb_intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
+ temp_value = val;
+ if (psb_intel_sdvo_connector->left == property) {
+ drm_object_property_set_value(&connector->base,
+ psb_intel_sdvo_connector->right, val);
+ if (psb_intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->right == property) {
+ drm_object_property_set_value(&connector->base,
+ psb_intel_sdvo_connector->left, val);
+ if (psb_intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->top == property) {
+ drm_object_property_set_value(&connector->base,
+ psb_intel_sdvo_connector->bottom, val);
+ if (psb_intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->bottom == property) {
+ drm_object_property_set_value(&connector->base,
+ psb_intel_sdvo_connector->top, val);
+ if (psb_intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ }
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+ }
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (psb_intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->primary->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
+}
+
+static void psb_intel_sdvo_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
+
+ sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
+}
+
+static void psb_intel_sdvo_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
+ struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
+
+ /* Force a full mode set on the crtc. We're supposed to have the
+ mode_config lock already. */
+ if (connector->status == connector_status_connected)
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ NULL);
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+ .dpms = psb_intel_sdvo_dpms,
+ .mode_fixup = psb_intel_sdvo_mode_fixup,
+ .prepare = gma_encoder_prepare,
+ .mode_set = psb_intel_sdvo_mode_set,
+ .commit = gma_encoder_commit,
+};
+
+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = psb_intel_sdvo_save,
+ .restore = psb_intel_sdvo_restore,
+ .detect = psb_intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_sdvo_set_property,
+ .destroy = psb_intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
+ .get_modes = psb_intel_sdvo_get_modes,
+ .mode_valid = psb_intel_sdvo_mode_valid,
+ .best_encoder = gma_best_encoder,
+};
+
+static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+
+ if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode);
+
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ gma_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+ .destroy = psb_intel_sdvo_enc_destroy,
+};
+
+static void
+psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
+{
+ /* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
+ * We need to figure out if this is true for all available poulsbo
+ * hardware, or if we need to fiddle with the guessing code above.
+ * The problem might go away if we can parse sdvo mappings from bios */
+ sdvo->ddc_bus = 2;
+
+#if 0
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+#endif
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ psb_intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin, speed;
+
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
+
+ pin = GMBUS_PORT_DPB;
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
+ pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
+ }
+
+ if (pin < GMBUS_NUM_PORTS) {
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ gma_intel_gmbus_set_speed(sdvo->i2c, speed);
+ gma_intel_gmbus_force_bit(sdvo->i2c, true);
+ } else
+ sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+}
+
+static bool
+psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
+}
+
+static u8
+psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (IS_SDVOB(sdvo_reg)) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (IS_SDVOB(sdvo_reg))
+ return 0x70;
+ else
+ return 0x72;
+}
+
+static void
+psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
+ struct psb_intel_sdvo *encoder)
+{
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &psb_intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &psb_intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 0;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ gma_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
+{
+ /* FIXME: We don't support HDMI at the moment
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+ */
+}
+
+static bool
+psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct gma_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ // connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ psb_intel_sdvo->is_hdmi = true;
+ }
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (psb_intel_sdvo->is_hdmi)
+ psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct gma_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ psb_intel_sdvo->controlled_output |= type;
+ psb_intel_sdvo_connector->output_flag = type;
+
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+
+ if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
+ goto err;
+
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct gma_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
+ psb_intel_sdvo);
+ return true;
+}
+
+static bool
+psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct gma_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
+{
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+ psb_intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ unsigned char bytes[2];
+
+ psb_intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(psb_intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+
+ return true;
+}
+
+static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
+ return false;
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ psb_intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
+
+
+ psb_intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", psb_intel_sdvo_connector->format_supported_num);
+ if (!psb_intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(
+ psb_intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+ drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
+ psb_intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ psb_intel_sdvo_connector->max_##name = data_value[0]; \
+ psb_intel_sdvo_connector->cur_##name = response; \
+ psb_intel_sdvo_connector->name = \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ if (!psb_intel_sdvo_connector->name) return false; \
+ drm_object_attach_property(&connector->base, \
+ psb_intel_sdvo_connector->name, \
+ psb_intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while(0)
+
+static bool
+psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_hscan = data_value[0];
+ psb_intel_sdvo_connector->left_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
+ psb_intel_sdvo_connector->left =
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ if (!psb_intel_sdvo_connector->left)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ psb_intel_sdvo_connector->left,
+ psb_intel_sdvo_connector->left_margin);
+
+ psb_intel_sdvo_connector->right =
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ if (!psb_intel_sdvo_connector->right)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ psb_intel_sdvo_connector->right,
+ psb_intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_vscan = data_value[0];
+ psb_intel_sdvo_connector->top_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
+ psb_intel_sdvo_connector->top =
+ drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]);
+ if (!psb_intel_sdvo_connector->top)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ psb_intel_sdvo_connector->top,
+ psb_intel_sdvo_connector->top_margin);
+
+ psb_intel_sdvo_connector->bottom =
+ drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]);
+ if (!psb_intel_sdvo_connector->bottom)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ psb_intel_sdvo_connector->bottom,
+ psb_intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_dot_crawl = 1;
+ psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ psb_intel_sdvo_connector->dot_crawl =
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ if (!psb_intel_sdvo_connector->dot_crawl)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ psb_intel_sdvo_connector->dot_crawl,
+ psb_intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
+{
+ union {
+ struct psb_intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+ enhancements.response = 0;
+ psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else if(IS_LVDS(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
+ .master_xfer = psb_intel_sdvo_ddc_proxy_xfer,
+ .functionality = psb_intel_sdvo_ddc_proxy_func
+};
+
+static bool
+psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_encoder *gma_encoder;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ int i;
+
+ psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
+ if (!psb_intel_sdvo)
+ return false;
+
+ psb_intel_sdvo->sdvo_reg = sdvo_reg;
+ psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+ if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
+ kfree(psb_intel_sdvo);
+ return false;
+ }
+
+ /* encoder type will be decided later */
+ gma_encoder = &psb_intel_sdvo->base;
+ gma_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+ }
+
+ if (IS_SDVOB(sdvo_reg))
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+ else
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+ drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
+
+ /* In default case sdvo lvds is false */
+ if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
+ goto err;
+
+ if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
+ psb_intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+
+ psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ goto err;
+
+ if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
+ &psb_intel_sdvo->pixel_clock_min,
+ &psb_intel_sdvo->pixel_clock_max))
+ goto err;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(psb_intel_sdvo),
+ psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
+ psb_intel_sdvo->caps.device_rev_id,
+ psb_intel_sdvo->pixel_clock_min / 1000,
+ psb_intel_sdvo->pixel_clock_max / 1000,
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err:
+ drm_encoder_cleanup(&gma_encoder->base);
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ kfree(psb_intel_sdvo);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644
index 00000000000..600e79744d6
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
@@ -0,0 +1,723 @@
+/*
+ * Copyright ? 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct psb_intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct psb_intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct psb_intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct psb_intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct psb_intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct psb_intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct psb_intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct psb_intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct psb_intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct psb_sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct psb_intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+struct psb_intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+struct psb_intel_sdvo_enhancements_arg {
+ u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+struct psb_intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
new file mode 100644
index 00000000000..624eb36511c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -0,0 +1,684 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include "psb_irq.h"
+#include "mdfld_output.h"
+
+/*
+ * inline functions
+ */
+
+static inline u32
+psb_pipestat(int pipe)
+{
+ if (pipe == 0)
+ return PIPEASTAT;
+ if (pipe == 1)
+ return PIPEBSTAT;
+ if (pipe == 2)
+ return PIPECSTAT;
+ BUG();
+}
+
+static inline u32
+mid_pipe_event(int pipe)
+{
+ if (pipe == 0)
+ return _PSB_PIPEA_EVENT_FLAG;
+ if (pipe == 1)
+ return _MDFLD_PIPEB_EVENT_FLAG;
+ if (pipe == 2)
+ return _MDFLD_PIPEC_EVENT_FLAG;
+ BUG();
+}
+
+static inline u32
+mid_pipe_vsync(int pipe)
+{
+ if (pipe == 0)
+ return _PSB_VSYNC_PIPEA_FLAG;
+ if (pipe == 1)
+ return _PSB_VSYNC_PIPEB_FLAG;
+ if (pipe == 2)
+ return _MDFLD_PIPEC_VBLANK_FLAG;
+ BUG();
+}
+
+static inline u32
+mid_pipeconf(int pipe)
+{
+ if (pipe == 0)
+ return PIPEACONF;
+ if (pipe == 1)
+ return PIPEBCONF;
+ if (pipe == 2)
+ return PIPECCONF;
+ BUG();
+}
+
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
+ u32 reg = psb_pipestat(pipe);
+ dev_priv->pipestat[pipe] |= mask;
+ /* Enable the interrupt, clear any pending status */
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 writeVal = PSB_RVDC32(reg);
+ writeVal |= (mask | (mask >> 16));
+ PSB_WVDC32(writeVal, reg);
+ (void) PSB_RVDC32(reg);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
+ u32 reg = psb_pipestat(pipe);
+ dev_priv->pipestat[pipe] &= ~mask;
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 writeVal = PSB_RVDC32(reg);
+ writeVal &= ~mask;
+ PSB_WVDC32(writeVal, reg);
+ (void) PSB_RVDC32(reg);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 pipe_event = mid_pipe_event(pipe);
+ dev_priv->vdc_irq_mask |= pipe_event;
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ gma_power_end(dev_priv->dev);
+ }
+}
+
+static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+ if (dev_priv->pipestat[pipe] == 0) {
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 pipe_event = mid_pipe_event(pipe);
+ dev_priv->vdc_irq_mask &= ~pipe_event;
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+
+ uint32_t pipe_stat_val = 0;
+ uint32_t pipe_stat_reg = psb_pipestat(pipe);
+ uint32_t pipe_enable = dev_priv->pipestat[pipe];
+ uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+ uint32_t pipe_clear;
+ uint32_t i = 0;
+
+ spin_lock(&dev_priv->irqmask_lock);
+
+ pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+ pipe_stat_val &= pipe_enable | pipe_status;
+ pipe_stat_val &= pipe_stat_val >> 16;
+
+ spin_unlock(&dev_priv->irqmask_lock);
+
+ /* Clear the 2nd level interrupt status bits
+ * Sometimes the bits are very sticky so we repeat until they unstick */
+ for (i = 0; i < 0xffff; i++) {
+ PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
+ pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
+
+ if (pipe_clear == 0)
+ break;
+ }
+
+ if (pipe_clear)
+ dev_err(dev->dev,
+ "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+ __func__, pipe, PSB_RVDC32(pipe_stat_reg));
+
+ if (pipe_stat_val & PIPE_VBLANK_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stat_val & PIPE_TE_STATUS)
+ drm_handle_vblank(dev, pipe);
+}
+
+/*
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+ if (vdc_stat & _PSB_IRQ_ASLE)
+ psb_intel_opregion_asle_intr(dev);
+
+ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
+ mid_pipe_event_handler(dev, 0);
+
+ if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+ mid_pipe_event_handler(dev, 1);
+}
+
+/*
+ * SGX interrupt handler
+ */
+static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 val, addr;
+ int error = false;
+
+ if (stat_1 & _PSB_CE_TWOD_COMPLETE)
+ val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
+
+ if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
+ if (val) {
+ if (val & _PSB_CBI_STAT_PF_N_RW)
+ DRM_ERROR("SGX MMU page fault:");
+ else
+ DRM_ERROR("SGX MMU read / write protection fault:");
+
+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
+ DRM_ERROR("\tCache requestor");
+ if (val & _PSB_CBI_STAT_FAULT_TA)
+ DRM_ERROR("\tTA requestor");
+ if (val & _PSB_CBI_STAT_FAULT_VDM)
+ DRM_ERROR("\tVDM requestor");
+ if (val & _PSB_CBI_STAT_FAULT_2D)
+ DRM_ERROR("\t2D requestor");
+ if (val & _PSB_CBI_STAT_FAULT_PBE)
+ DRM_ERROR("\tPBE requestor");
+ if (val & _PSB_CBI_STAT_FAULT_TSP)
+ DRM_ERROR("\tTSP requestor");
+ if (val & _PSB_CBI_STAT_FAULT_ISP)
+ DRM_ERROR("\tISP requestor");
+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
+ DRM_ERROR("\tUSSEPDS requestor");
+ if (val & _PSB_CBI_STAT_FAULT_HOST)
+ DRM_ERROR("\tHost requestor");
+
+ DRM_ERROR("\tMMU failing address is 0x%08x.\n",
+ (unsigned int)addr);
+ error = true;
+ }
+ }
+
+ /* Clear bits */
+ PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
+ PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
+}
+
+irqreturn_t psb_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
+ u32 sgx_stat_1, sgx_stat_2;
+ int handled = 0;
+
+ spin_lock(&dev_priv->irqmask_lock);
+
+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+ if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
+ dsp_int = 1;
+
+ /* FIXME: Handle Medfield
+ if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
+ dsp_int = 1;
+ */
+
+ if (vdc_stat & _PSB_IRQ_SGX_FLAG)
+ sgx_int = 1;
+ if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
+ hotplug_int = 1;
+
+ vdc_stat &= dev_priv->vdc_irq_mask;
+ spin_unlock(&dev_priv->irqmask_lock);
+
+ if (dsp_int && gma_power_is_on(dev)) {
+ psb_vdc_interrupt(dev, vdc_stat);
+ handled = 1;
+ }
+
+ if (sgx_int) {
+ sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+ sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+ psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
+ handled = 1;
+ }
+
+ /* Note: this bit has other meanings on some devices, so we will
+ need to address that later if it ever matters */
+ if (hotplug_int && dev_priv->ops->hotplug) {
+ handled = dev_priv->ops->hotplug(dev);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+
+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+ rmb();
+
+ if (!handled)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (gma_power_is_on(dev)) {
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+ }
+ if (dev->vblank[0].enabled)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ if (dev->vblank[1].enabled)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ /* FIXME: Handle Medfield irq mask
+ if (dev->vblank[1].enabled)
+ dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+ if (dev->vblank[2].enabled)
+ dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+ */
+
+ /* Revisit this area - want per device masks ? */
+ if (dev_priv->ops->hotplug)
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ /* Enable 2D and MMU fault interrupts */
+ PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
+ PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+ if (dev->vblank[0].enabled)
+ psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank[1].enabled)
+ psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank[2].enabled)
+ psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, true);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, false);
+
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+ if (dev->vblank[0].enabled)
+ psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank[1].enabled)
+ psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank[2].enabled)
+ psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+ _PSB_IRQ_MSVDX_FLAG |
+ _LNC_IRQ_TOPAZ_FLAG;
+
+ /* These two registers are safe even if display island is off */
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+ wmb();
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ u32 hist_reg;
+ u32 pwm_reg;
+
+ if (gma_power_begin(dev, false)) {
+ PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+ PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+ PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
+ | PWM_PHASEIN_INT_ENABLE,
+ PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+ HISTOGRAM_INT_CONTROL);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+ PWM_CONTROL_LOGIC);
+
+ gma_power_end(dev);
+ }
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ /* enable DPST */
+ mid_enable_pipe_event(dev_priv, 0);
+ psb_irq_turn_on_dpst(dev);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ u32 hist_reg;
+ u32 pwm_reg;
+
+ if (gma_power_begin(dev, false)) {
+ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+ psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
+ PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ gma_power_end(dev);
+ }
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ mid_disable_pipe_event(dev_priv, 0);
+ psb_irq_turn_off_dpst(dev);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ uint32_t reg_val = 0;
+ uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+ /* Medfield is different - we should perhaps extract out vblank
+ and blacklight etc ops */
+ if (IS_MFLD(dev))
+ return mdfld_enable_te(dev, pipe);
+
+ if (gma_power_begin(dev, false)) {
+ reg_val = REG_READ(pipeconf_reg);
+ gma_power_end(dev);
+ }
+
+ if (!(reg_val & PIPEACONF_ENABLE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (IS_MFLD(dev))
+ mdfld_disable_te(dev, pipe);
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/*
+ * It is used to enable TE interrupt
+ */
+int mdfld_enable_te(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+ uint32_t reg_val = 0;
+ uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+ if (gma_power_begin(dev, false)) {
+ reg_val = REG_READ(pipeconf_reg);
+ gma_power_end(dev);
+ }
+
+ if (!(reg_val & PIPEACONF_ENABLE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ mid_enable_pipe_event(dev_priv, pipe);
+ psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+/*
+ * It is used to disable TE interrupt
+ */
+void mdfld_disable_te(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ if (!dev_priv->dsr_enable)
+ return;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ mid_disable_pipe_event(dev_priv, pipe);
+ psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ uint32_t high_frame = PIPEAFRAMEHIGH;
+ uint32_t low_frame = PIPEAFRAMEPIXEL;
+ uint32_t pipeconf_reg = PIPEACONF;
+ uint32_t reg_val = 0;
+ uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ high_frame = PIPEBFRAMEHIGH;
+ low_frame = PIPEBFRAMEPIXEL;
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ high_frame = PIPECFRAMEHIGH;
+ low_frame = PIPECFRAMEPIXEL;
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
+ return 0;
+ }
+
+ if (!gma_power_begin(dev, false))
+ return 0;
+
+ reg_val = REG_READ(pipeconf_reg);
+
+ if (!(reg_val & PIPEACONF_ENABLE)) {
+ dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+ pipe);
+ goto psb_get_vblank_counter_exit;
+ }
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+ PIPE_FRAME_LOW_SHIFT);
+ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ } while (high1 != high2);
+
+ count = (high1 << 8) | low;
+
+psb_get_vblank_counter_exit:
+
+ gma_power_end(dev);
+
+ return count;
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
new file mode 100644
index 00000000000..d0b45ffa112
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _PSB_IRQ_H_
+#define _PSB_IRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(int irq, void *arg);
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
+
+int mdfld_enable_te(struct drm_device *dev, int pipe);
+void mdfld_disable_te(struct drm_device *dev, int pipe);
+#endif /* _PSB_IRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
new file mode 100644
index 00000000000..1d2ebb5e530
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -0,0 +1,94 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/spinlock.h>
+
+static void psb_lid_timer_func(unsigned long data)
+{
+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+ struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+ struct timer_list *lid_timer = &dev_priv->lid_timer;
+ unsigned long irq_flags;
+ u32 __iomem *lid_state = dev_priv->opregion.lid_state;
+ u32 pp_status;
+
+ if (readl(lid_state) == dev_priv->lid_last_state)
+ goto lid_timer_schedule;
+
+ if ((readl(lid_state)) & 0x01) {
+ /*lid state is open*/
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0 &&
+ (pp_status & PP_SEQUENCE_MASK) != 0);
+
+ if (REG_READ(PP_STATUS) & PP_ON) {
+ /*FIXME: should be backlight level before*/
+ psb_intel_lvds_set_brightness(dev, 100);
+ } else {
+ DRM_DEBUG("LVDS panel never powered up");
+ return;
+ }
+ } else {
+ psb_intel_lvds_set_brightness(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+ }
+ dev_priv->lid_last_state = readl(lid_state);
+
+lid_timer_schedule:
+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+ if (!timer_pending(lid_timer)) {
+ lid_timer->expires = jiffies + PSB_LID_DELAY;
+ add_timer(lid_timer);
+ }
+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+{
+ struct timer_list *lid_timer = &dev_priv->lid_timer;
+ unsigned long irq_flags;
+
+ spin_lock_init(&dev_priv->lid_lock);
+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+
+ init_timer(lid_timer);
+
+ lid_timer->data = (unsigned long)dev_priv;
+ lid_timer->function = psb_lid_timer_func;
+ lid_timer->expires = jiffies + PSB_LID_DELAY;
+
+ add_timer(lid_timer);
+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+{
+ del_timer_sync(&dev_priv->lid_timer);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
new file mode 100644
index 00000000000..b81c7c1e9c2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_reg.h
@@ -0,0 +1,582 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+ *
+ **************************************************************************/
+
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL 0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
+
+#define PSB_CR_CORE_ID 0x0010
+#define _PSB_CC_ID_ID_SHIFT (16)
+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT (0)
+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION 0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
+
+#define PSB_CR_SOFT_RESET 0x0080
+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
+#define _PSB_CS_RESET_USE_RESET (1 << 4)
+#define _PSB_CS_RESET_TA_RESET (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
+
+#define PSB_CR_EVENT_STATUS2 0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
+
+#define PSB_CR_EVENT_STATUS 0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
+#define _PSB_CE_SW_EVENT (1 << 14)
+#define _PSB_CE_TA_FINISHED (1 << 13)
+#define _PSB_CE_TA_TERMINATE (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK 0x0007FFFF
+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0 0x0A0C
+#define PSB_CR_USE_CODE_BASE1 0x0A10
+#define PSB_CR_USE_CODE_BASE2 0x0A14
+#define PSB_CR_USE_CODE_BASE3 0x0A18
+#define PSB_CR_USE_CODE_BASE4 0x0A1C
+#define PSB_CR_USE_CODE_BASE5 0x0A20
+#define PSB_CR_USE_CODE_BASE6 0x0A24
+#define PSB_CR_USE_CODE_BASE7 0x0A28
+#define PSB_CR_USE_CODE_BASE8 0x0A2C
+#define PSB_CR_USE_CODE_BASE9 0x0A30
+#define PSB_CR_USE_CODE_BASE10 0x0A34
+#define PSB_CR_USE_CODE_BASE11 0x0A38
+#define PSB_CR_USE_CODE_BASE12 0x0A3C
+#define PSB_CR_USE_CODE_BASE13 0x0A40
+#define PSB_CR_USE_CODE_BASE14 0x0A44
+#define PSB_CR_USE_CODE_BASE15 0x0A48
+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT (25)
+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX (0)
+#define _PSB_CUC_DM_PIXEL (1)
+#define _PSB_CUC_DM_RESERVED (2)
+#define _PSB_CUC_DM_EDM (3)
+
+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
+
+#define PSB_CR_EVENT_KICKER 0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK 0x0AC8
+#define _PSB_CE_KICK_NOW (1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
+
+#define PSB_CR_BIF_CTRL 0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
+#define _PSB_CB_CTRL_INVALDC (1 << 3)
+#define _PSB_CB_CTRL_FLUSH (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT 0x0C04
+
+#define PSB_CR_BIF_FAULT 0x0C08
+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
+
+#define PSB_CR_BIF_BANK0 0x0C78
+#define PSB_CR_BIF_BANK1 0x0C7C
+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
+
+#define PSB_CR_2D_SOCIF 0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS 0x0E04
+#define _PSB_C2B_STATUS_BUSY (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define PSB_2D_CLIP_BH (0x00000000)
+#define PSB_2D_PAT_BH (0x10000000)
+#define PSB_2D_CTRL_BH (0x20000000)
+#define PSB_2D_SRC_OFF_BH (0x30000000)
+#define PSB_2D_MASK_OFF_BH (0x40000000)
+#define PSB_2D_RESERVED1_BH (0x50000000)
+#define PSB_2D_RESERVED2_BH (0x60000000)
+#define PSB_2D_FENCE_BH (0x70000000)
+#define PSB_2D_BLIT_BH (0x80000000)
+#define PSB_2D_SRC_SURF_BH (0x90000000)
+#define PSB_2D_DST_SURF_BH (0xA0000000)
+#define PSB_2D_PAT_SURF_BH (0xB0000000)
+#define PSB_2D_SRC_PAL_BH (0xC0000000)
+#define PSB_2D_PAT_PAL_BH (0xD0000000)
+#define PSB_2D_MASK_SURF_BH (0xE0000000)
+#define PSB_2D_FLUSH_BH (0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX (1)
+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT (0)
+/* clip rectangle min & max */
+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT (12)
+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT (0)
+/* clip rectangle offset */
+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT (12)
+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT (0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT (5)
+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT (10)
+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT (15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+/* Present Flags */
+#define PSB_2D_SRCCK_CTRL (0x00000001)
+#define PSB_2D_DSTCK_CTRL (0x00000002)
+#define PSB_2D_ALPHA_CTRL (0x00000004)
+/* Colour Key Colour (SRC/DST)*/
+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
+#define PSB_2D_CK_COL_SHIFT (0)
+/* Colour Key Mask (SRC/DST)*/
+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT (0)
+/* Alpha Control (Alpha/RGB)*/
+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT (12)
+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK (3 << 25)
+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE (0 << 25)
+#define PSB_2D_ROT_90DEGS (1 << 25)
+#define PSB_2D_ROT_180DEGS (2 << 25)
+#define PSB_2D_ROT_270DEGS (3 << 25)
+
+#define PSB_2D_COPYORDER_MASK (3 << 23)
+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR (0 << 23)
+#define PSB_2D_COPYORDER_BR2TL (1 << 23)
+#define PSB_2D_COPYORDER_TR2BL (2 << 23)
+#define PSB_2D_COPYORDER_BL2TR (3 << 23)
+
+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE (0x00000000)
+#define PSB_2D_DSTCK_PASS (0x00200000)
+#define PSB_2D_DSTCK_REJECT (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE (0x00000000)
+#define PSB_2D_SRCCK_PASS (0x00080000)
+#define PSB_2D_SRCCK_REJECT (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK (0x00010000)
+#define PSB_2D_USE_PAT (0x00010000)
+#define PSB_2D_USE_FILL (0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT (8)
+/* rop code A */
+#define PSB_2D_ROP3A_MASK (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT (0)
+
+#define PSB_2D_ROP4_MASK (0x0000FFFF)
+/*
+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
+ * Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT (0)
+/*
+ * DWORD1: (Always Present)
+ * X Start (Dest)
+ * Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT (12)
+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT (0)
+/*
+ * DWORD2: (Always Present)
+ * X Size (Dest)
+ * Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT (12)
+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT (0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
+#define PSB_2D_SRC_1_PAL (0x00000000)
+#define PSB_2D_SRC_2_PAL (0x00008000)
+#define PSB_2D_SRC_4_PAL (0x00010000)
+#define PSB_2D_SRC_8_PAL (0x00018000)
+#define PSB_2D_SRC_8_ALPHA (0x00020000)
+#define PSB_2D_SRC_4_ALPHA (0x00028000)
+#define PSB_2D_SRC_332RGB (0x00030000)
+#define PSB_2D_SRC_4444ARGB (0x00038000)
+#define PSB_2D_SRC_555RGB (0x00040000)
+#define PSB_2D_SRC_1555ARGB (0x00048000)
+#define PSB_2D_SRC_565RGB (0x00050000)
+#define PSB_2D_SRC_0888ARGB (0x00058000)
+#define PSB_2D_SRC_8888ARGB (0x00060000)
+#define PSB_2D_SRC_8888UYVY (0x00068000)
+#define PSB_2D_SRC_RESERVED (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
+#define PSB_2D_PAT_1_PAL (0x00000000)
+#define PSB_2D_PAT_2_PAL (0x00008000)
+#define PSB_2D_PAT_4_PAL (0x00010000)
+#define PSB_2D_PAT_8_PAL (0x00018000)
+#define PSB_2D_PAT_8_ALPHA (0x00020000)
+#define PSB_2D_PAT_4_ALPHA (0x00028000)
+#define PSB_2D_PAT_332RGB (0x00030000)
+#define PSB_2D_PAT_4444ARGB (0x00038000)
+#define PSB_2D_PAT_555RGB (0x00040000)
+#define PSB_2D_PAT_1555ARGB (0x00048000)
+#define PSB_2D_PAT_565RGB (0x00050000)
+#define PSB_2D_PAT_0888ARGB (0x00058000)
+#define PSB_2D_PAT_8888ARGB (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
+#define PSB_2D_DST_332RGB (0x00030000)
+#define PSB_2D_DST_4444ARGB (0x00038000)
+#define PSB_2D_DST_555RGB (0x00040000)
+#define PSB_2D_DST_1555ARGB (0x00048000)
+#define PSB_2D_DST_565RGB (0x00050000)
+#define PSB_2D_DST_0888ARGB (0x00058000)
+#define PSB_2D_DST_8888ARGB (0x00060000)
+#define PSB_2D_DST_8888AYUV (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN (1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS (0x0000)
+#define PSB_2D_ROP3_SRC (0xCC)
+#define PSB_2D_ROP3_PAT (0xF0)
+#define PSB_2D_ROP3_DST (0xAA)
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE 16
+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES 2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK 0
+#define PSB_RASTER 1
+#define PSB_RETURN 2
+#define PSB_TA 3
+
+/* Power management */
+#define PSB_PUNIT_PORT 0x04
+#define PSB_OSPMBA 0x78
+#define PSB_APMBA 0x7a
+#define PSB_APM_CMD 0x0
+#define PSB_APM_STS 0x04
+#define PSB_PWRGT_VID_ENC_MASK 0x30
+#define PSB_PWRGT_VID_DEC_MASK 0xc
+#define PSB_PWRGT_GL3_MASK 0xc0
+
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
+#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+/* Display SSS register bits are different in A0 vs. B0 */
+#define PSB_PWRGT_GFX_MASK 0x3
+#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
+#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
+#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
+#define PSB_PWRGT_GFX_MASK_B0 0xc3
+#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#endif
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
new file mode 100644
index 00000000000..771ff66711a
--- /dev/null
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "tc35876x-dsi-lvds.h"
+#include <linux/i2c/tc35876x.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct i2c_client *tc35876x_client;
+static struct i2c_client *cmi_lcd_i2c_client;
+
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+
+/* DSI D-PHY Layer Registers */
+#define D0W_DPHYCONTTX 0x0004
+#define CLW_DPHYCONTRX 0x0020
+#define D0W_DPHYCONTRX 0x0024
+#define D1W_DPHYCONTRX 0x0028
+#define D2W_DPHYCONTRX 0x002C
+#define D3W_DPHYCONTRX 0x0030
+#define COM_DPHYCONTRX 0x0038
+#define CLW_CNTRL 0x0040
+#define D0W_CNTRL 0x0044
+#define D1W_CNTRL 0x0048
+#define D2W_CNTRL 0x004C
+#define D3W_CNTRL 0x0050
+#define DFTMODE_CNTRL 0x0054
+
+/* DSI PPI Layer Registers */
+#define PPI_STARTPPI 0x0104
+#define PPI_BUSYPPI 0x0108
+#define PPI_LINEINITCNT 0x0110
+#define PPI_LPTXTIMECNT 0x0114
+#define PPI_LANEENABLE 0x0134
+#define PPI_TX_RX_TA 0x013C
+#define PPI_CLS_ATMR 0x0140
+#define PPI_D0S_ATMR 0x0144
+#define PPI_D1S_ATMR 0x0148
+#define PPI_D2S_ATMR 0x014C
+#define PPI_D3S_ATMR 0x0150
+#define PPI_D0S_CLRSIPOCOUNT 0x0164
+#define PPI_D1S_CLRSIPOCOUNT 0x0168
+#define PPI_D2S_CLRSIPOCOUNT 0x016C
+#define PPI_D3S_CLRSIPOCOUNT 0x0170
+#define CLS_PRE 0x0180
+#define D0S_PRE 0x0184
+#define D1S_PRE 0x0188
+#define D2S_PRE 0x018C
+#define D3S_PRE 0x0190
+#define CLS_PREP 0x01A0
+#define D0S_PREP 0x01A4
+#define D1S_PREP 0x01A8
+#define D2S_PREP 0x01AC
+#define D3S_PREP 0x01B0
+#define CLS_ZERO 0x01C0
+#define D0S_ZERO 0x01C4
+#define D1S_ZERO 0x01C8
+#define D2S_ZERO 0x01CC
+#define D3S_ZERO 0x01D0
+#define PPI_CLRFLG 0x01E0
+#define PPI_CLRSIPO 0x01E4
+#define HSTIMEOUT 0x01F0
+#define HSTIMEOUTENABLE 0x01F4
+
+/* DSI Protocol Layer Registers */
+#define DSI_STARTDSI 0x0204
+#define DSI_BUSYDSI 0x0208
+#define DSI_LANEENABLE 0x0210
+#define DSI_LANESTATUS0 0x0214
+#define DSI_LANESTATUS1 0x0218
+#define DSI_INTSTATUS 0x0220
+#define DSI_INTMASK 0x0224
+#define DSI_INTCLR 0x0228
+#define DSI_LPTXTO 0x0230
+
+/* DSI General Registers */
+#define DSIERRCNT 0x0300
+
+/* DSI Application Layer Registers */
+#define APLCTRL 0x0400
+#define RDPKTLN 0x0404
+
+/* Video Path Registers */
+#define VPCTRL 0x0450
+#define HTIM1 0x0454
+#define HTIM2 0x0458
+#define VTIM1 0x045C
+#define VTIM2 0x0460
+#define VFUEN 0x0464
+
+/* LVDS Registers */
+#define LVMX0003 0x0480
+#define LVMX0407 0x0484
+#define LVMX0811 0x0488
+#define LVMX1215 0x048C
+#define LVMX1619 0x0490
+#define LVMX2023 0x0494
+#define LVMX2427 0x0498
+#define LVCFG 0x049C
+#define LVPHY0 0x04A0
+#define LVPHY1 0x04A4
+
+/* System Registers */
+#define SYSSTAT 0x0500
+#define SYSRST 0x0504
+
+/* GPIO Registers */
+/*#define GPIOC 0x0520*/
+#define GPIOO 0x0524
+#define GPIOI 0x0528
+
+/* I2C Registers */
+#define I2CTIMCTRL 0x0540
+#define I2CMADDR 0x0544
+#define WDATAQ 0x0548
+#define RDATAQ 0x054C
+
+/* Chip/Rev Registers */
+#define IDREG 0x0580
+
+/* Debug Registers */
+#define DEBUG00 0x05A0
+#define DEBUG01 0x05A4
+
+/* Panel CABC registers */
+#define PANEL_PWM_CONTROL 0x90
+#define PANEL_FREQ_DIVIDER_HI 0x91
+#define PANEL_FREQ_DIVIDER_LO 0x92
+#define PANEL_DUTY_CONTROL 0x93
+#define PANEL_MODIFY_RGB 0x94
+#define PANEL_FRAMERATE_CONTROL 0x96
+#define PANEL_PWM_MIN 0x97
+#define PANEL_PWM_REF 0x98
+#define PANEL_PWM_MAX 0x99
+#define PANEL_ALLOW_DISTORT 0x9A
+#define PANEL_BYPASS_PWMI 0x9B
+
+/* Panel color management registers */
+#define PANEL_CM_ENABLE 0x700
+#define PANEL_CM_HUE 0x701
+#define PANEL_CM_SATURATION 0x702
+#define PANEL_CM_INTENSITY 0x703
+#define PANEL_CM_BRIGHTNESS 0x704
+#define PANEL_CM_CE_ENABLE 0x705
+#define PANEL_CM_PEAK_EN 0x710
+#define PANEL_CM_GAIN 0x711
+#define PANEL_CM_HUETABLE_START 0x730
+#define PANEL_CM_HUETABLE_END 0x747 /* inclusive */
+
+/* Input muxing for registers LVMX0003...LVMX2427 */
+enum {
+ INPUT_R0, /* 0 */
+ INPUT_R1,
+ INPUT_R2,
+ INPUT_R3,
+ INPUT_R4,
+ INPUT_R5,
+ INPUT_R6,
+ INPUT_R7,
+ INPUT_G0, /* 8 */
+ INPUT_G1,
+ INPUT_G2,
+ INPUT_G3,
+ INPUT_G4,
+ INPUT_G5,
+ INPUT_G6,
+ INPUT_G7,
+ INPUT_B0, /* 16 */
+ INPUT_B1,
+ INPUT_B2,
+ INPUT_B3,
+ INPUT_B4,
+ INPUT_B5,
+ INPUT_B6,
+ INPUT_B7,
+ INPUT_HSYNC, /* 24 */
+ INPUT_VSYNC,
+ INPUT_DE,
+ LOGIC_0,
+ /* 28...31 undefined */
+};
+
+#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00) \
+ (FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) | \
+ FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0))
+
+/**
+ * tc35876x_regw - Write DSI-LVDS bridge register using I2C
+ * @client: struct i2c_client to use
+ * @reg: register address
+ * @value: value to write
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value)
+{
+ int r;
+ u8 tx_data[] = {
+ /* NOTE: Register address big-endian, data little-endian. */
+ (reg >> 8) & 0xff,
+ reg & 0xff,
+ value & 0xff,
+ (value >> 8) & 0xff,
+ (value >> 16) & 0xff,
+ (value >> 24) & 0xff,
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .buf = tx_data,
+ .len = ARRAY_SIZE(tx_data),
+ },
+ };
+
+ r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (r < 0) {
+ dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n",
+ __func__, reg, value, r);
+ return r;
+ }
+
+ if (r < ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n",
+ __func__, reg, value, r);
+ return -EAGAIN;
+ }
+
+ dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n",
+ __func__, reg, value);
+
+ return 0;
+}
+
+/**
+ * tc35876x_regr - Read DSI-LVDS bridge register using I2C
+ * @client: struct i2c_client to use
+ * @reg: register address
+ * @value: pointer for storing the value
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
+{
+ int r;
+ u8 tx_data[] = {
+ (reg >> 8) & 0xff,
+ reg & 0xff,
+ };
+ u8 rx_data[4];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .buf = tx_data,
+ .len = ARRAY_SIZE(tx_data),
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = rx_data,
+ .len = ARRAY_SIZE(rx_data),
+ },
+ };
+
+ r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (r < 0) {
+ dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__,
+ reg, r);
+ return r;
+ }
+
+ if (r < ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__,
+ reg, r);
+ return -EAGAIN;
+ }
+
+ *value = rx_data[0] << 24 | rx_data[1] << 16 |
+ rx_data[2] << 8 | rx_data[3];
+
+ dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__,
+ reg, *value);
+
+ return 0;
+}
+
+void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
+{
+ struct tc35876x_platform_data *pdata;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
+
+ pdata = dev_get_platdata(&tc35876x_client->dev);
+
+ if (pdata->gpio_bridge_reset == -1)
+ return;
+
+ if (state) {
+ gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ mdelay(10);
+ } else {
+ /* Pull MIPI Bridge reset pin to Low */
+ gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ mdelay(20);
+ /* Pull MIPI Bridge reset pin to High */
+ gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1);
+ mdelay(40);
+ }
+}
+
+void tc35876x_configure_lvds_bridge(struct drm_device *dev)
+{
+ struct i2c_client *i2c = tc35876x_client;
+ u32 ppi_lptxtimecnt;
+ u32 txtagocnt;
+ u32 txtasurecnt;
+ u32 id;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+ if (!tc35876x_regr(i2c, IDREG, &id))
+ dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id);
+ else
+ dev_err(&tc35876x_client->dev, "Cannot read ID\n");
+
+ ppi_lptxtimecnt = 4;
+ txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4;
+ txtasurecnt = 3 * ppi_lptxtimecnt / 2;
+ tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) |
+ FLD_VAL(txtasurecnt, 10, 0));
+ tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0));
+
+ tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+ tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+ tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+ tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+
+ /* Enabling MIPI & PPI lanes, Enable 4 lanes */
+ tc35876x_regw(i2c, PPI_LANEENABLE,
+ BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
+ tc35876x_regw(i2c, DSI_LANEENABLE,
+ BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
+ tc35876x_regw(i2c, PPI_STARTPPI, BIT(0));
+ tc35876x_regw(i2c, DSI_STARTDSI, BIT(0));
+
+ /* Setting LVDS output frequency */
+ tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) |
+ FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */
+
+ /* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */
+ tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5));
+
+ /* Horizontal back porch and horizontal pulse width. 0x00280028 */
+ tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0));
+
+ /* Horizontal front porch and horizontal active video size. 0x00500500*/
+ tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0));
+
+ /* Vertical back porch and vertical sync pulse width. 0x000e000a */
+ tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0));
+
+ /* Vertical front porch and vertical display size. 0x000e0320 */
+ tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0));
+
+ /* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */
+ tc35876x_regw(i2c, VFUEN, BIT(0));
+
+ /* Soft reset LCD controller. */
+ tc35876x_regw(i2c, SYSRST, BIT(2));
+
+ /* LVDS-TX input muxing */
+ tc35876x_regw(i2c, LVMX0003,
+ INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2));
+ tc35876x_regw(i2c, LVMX0407,
+ INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6));
+ tc35876x_regw(i2c, LVMX0811,
+ INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3));
+ tc35876x_regw(i2c, LVMX1215,
+ INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5));
+ tc35876x_regw(i2c, LVMX1619,
+ INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0));
+ tc35876x_regw(i2c, LVMX2023,
+ INPUT_MUX(LOGIC_0, INPUT_B7, INPUT_B6, INPUT_B5));
+ tc35876x_regw(i2c, LVMX2427,
+ INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC));
+
+ /* Enable LVDS transmitter. */
+ tc35876x_regw(i2c, LVCFG, BIT(0));
+
+ /* Clear notifications. Don't write reserved bits. Was write 0xffffffff
+ * to 0x0288, must be in error?! */
+ tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0));
+}
+
+#define GPIOPWMCTRL 0x38F
+#define PWM0CLKDIV0 0x62 /* low byte */
+#define PWM0CLKDIV1 0x61 /* high byte */
+
+#define SYSTEMCLK 19200000UL /* 19.2 MHz */
+#define PWM_FREQUENCY 9600 /* Hz */
+
+/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */
+static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f)
+{
+ return (baseclk - f) / f;
+}
+
+static void tc35876x_brightness_init(struct drm_device *dev)
+{
+ int ret;
+ u8 pwmctrl;
+ u16 clkdiv;
+
+ /* Make sure the PWM reference is the 19.2 MHz system clock. Read first
+ * instead of setting directly to catch potential conflicts between PWM
+ * users. */
+ ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
+ if (ret || pwmctrl != 0x01) {
+ if (ret)
+ dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
+ else
+ dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
+
+ ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
+ if (ret)
+ dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
+ }
+
+ clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
+
+ ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff);
+ if (!ret)
+ ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
+
+ if (ret)
+ dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
+ else
+ dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
+ clkdiv, PWM_FREQUENCY);
+}
+
+#define PWM0DUTYCYCLE 0x67
+
+void tc35876x_brightness_control(struct drm_device *dev, int level)
+{
+ int ret;
+ u8 duty_val;
+ u8 panel_duty_val;
+
+ level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+ /* PWM duty cycle 0x00...0x63 corresponds to 0...99% */
+ duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL;
+
+ /* I won't pretend to understand this formula. The panel spec is quite
+ * bad engrish.
+ */
+ panel_duty_val = (2 * level - 100) * 0xA9 /
+ MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56;
+
+ ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val);
+ if (ret)
+ dev_err(&tc35876x_client->dev, "%s: ipc write fail\n",
+ __func__);
+
+ if (cmi_lcd_i2c_client) {
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_PWM_MAX, panel_duty_val);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n",
+ __func__);
+ }
+}
+
+void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
+{
+ struct tc35876x_platform_data *pdata;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+ pdata = dev_get_platdata(&tc35876x_client->dev);
+
+ if (pdata->gpio_panel_bl_en != -1)
+ gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0);
+
+ if (pdata->gpio_panel_vadd != -1)
+ gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0);
+}
+
+void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
+{
+ struct tc35876x_platform_data *pdata;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+ pdata = dev_get_platdata(&tc35876x_client->dev);
+
+ if (pdata->gpio_panel_vadd != -1) {
+ gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1);
+ msleep(260);
+ }
+
+ if (cmi_lcd_i2c_client) {
+ int ret;
+ dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n");
+ /* Bit 4 is average_saving. Setting it to 1, the brightness is
+ * referenced to the average of the frame content. 0 means
+ * reference to the maximum of frame contents. Bits 3:0 are
+ * allow_distort. When set to a nonzero value, all color values
+ * between 255-allow_distort*2 and 255 are mapped to the
+ * 255-allow_distort*2 value.
+ */
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_ALLOW_DISTORT, 0x10);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev,
+ "i2c write failed (%d)\n", ret);
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_BYPASS_PWMI, 0);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev,
+ "i2c write failed (%d)\n", ret);
+ /* Set minimum brightness value - this is tunable */
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_PWM_MIN, 0x35);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev,
+ "i2c write failed (%d)\n", ret);
+ }
+
+ if (pdata->gpio_panel_bl_en != -1)
+ gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1);
+
+ tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
+}
+
+static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+
+ dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ /* FIXME: do this properly. */
+ mode->hdisplay = 1280;
+ mode->vdisplay = 800;
+ mode->hsync_start = 1360;
+ mode->hsync_end = 1400;
+ mode->htotal = 1440;
+ mode->vsync_start = 814;
+ mode->vsync_end = 824;
+ mode->vtotal = 838;
+ mode->clock = 33324 << 1;
+
+ dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
+ dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
+ dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
+ dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
+ dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
+ dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
+ dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
+ dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
+ dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+/* DV1 Active area 216.96 x 135.6 mm */
+#define DV1_PANEL_WIDTH 217
+#define DV1_PANEL_HEIGHT 136
+
+static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = DV1_PANEL_WIDTH;
+ pi->height_mm = DV1_PANEL_HEIGHT;
+
+ return 0;
+}
+
+static int tc35876x_bridge_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc35876x_platform_data *pdata;
+
+ dev_info(&client->dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata) {
+ dev_err(&client->dev, "%s: no platform data\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdata->gpio_bridge_reset != -1) {
+ gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset");
+ gpio_direction_output(pdata->gpio_bridge_reset, 0);
+ }
+
+ if (pdata->gpio_panel_bl_en != -1) {
+ gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en");
+ gpio_direction_output(pdata->gpio_panel_bl_en, 0);
+ }
+
+ if (pdata->gpio_panel_vadd != -1) {
+ gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd");
+ gpio_direction_output(pdata->gpio_panel_vadd, 0);
+ }
+
+ tc35876x_client = client;
+
+ return 0;
+}
+
+static int tc35876x_bridge_remove(struct i2c_client *client)
+{
+ struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev);
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ if (pdata->gpio_bridge_reset != -1)
+ gpio_free(pdata->gpio_bridge_reset);
+
+ if (pdata->gpio_panel_bl_en != -1)
+ gpio_free(pdata->gpio_panel_bl_en);
+
+ if (pdata->gpio_panel_vadd != -1)
+ gpio_free(pdata->gpio_panel_vadd);
+
+ tc35876x_client = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id tc35876x_bridge_id[] = {
+ { "i2c_disp_brig", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id);
+
+static struct i2c_driver tc35876x_bridge_i2c_driver = {
+ .driver = {
+ .name = "i2c_disp_brig",
+ },
+ .id_table = tc35876x_bridge_id,
+ .probe = tc35876x_bridge_probe,
+ .remove = tc35876x_bridge_remove,
+};
+
+/* LCD panel I2C */
+static int cmi_lcd_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ dev_info(&client->dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ cmi_lcd_i2c_client = client;
+
+ return 0;
+}
+
+static int cmi_lcd_i2c_remove(struct i2c_client *client)
+{
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ cmi_lcd_i2c_client = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id cmi_lcd_i2c_id[] = {
+ { "cmi-lcd", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id);
+
+static struct i2c_driver cmi_lcd_i2c_driver = {
+ .driver = {
+ .name = "cmi-lcd",
+ },
+ .id_table = cmi_lcd_i2c_id,
+ .probe = cmi_lcd_i2c_probe,
+ .remove = cmi_lcd_i2c_remove,
+};
+
+/* HACK to create I2C device while it's not created by platform code */
+#define CMI_LCD_I2C_ADAPTER 2
+#define CMI_LCD_I2C_ADDR 0x60
+
+static int cmi_lcd_hack_create_device(void)
+{
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info info = {
+ .type = "cmi-lcd",
+ .addr = CMI_LCD_I2C_ADDR,
+ };
+
+ pr_debug("%s\n", __func__);
+
+ adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER);
+ if (!adapter) {
+ pr_err("%s: i2c_get_adapter(%d) failed\n", __func__,
+ CMI_LCD_I2C_ADAPTER);
+ return -EINVAL;
+ }
+
+ client = i2c_new_device(adapter, &info);
+ if (!client) {
+ pr_err("%s: i2c_new_device() failed\n", __func__);
+ i2c_put_adapter(adapter);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tc35876x_funcs = {
+ .encoder_funcs = &tc35876x_encoder_funcs,
+ .encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
+ .get_config_mode = tc35876x_get_config_mode,
+ .get_panel_info = tc35876x_get_panel_info,
+};
+
+void tc35876x_init(struct drm_device *dev)
+{
+ int r;
+
+ dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+
+ cmi_lcd_hack_create_device();
+
+ r = i2c_add_driver(&cmi_lcd_i2c_driver);
+ if (r < 0)
+ dev_err(&dev->pdev->dev,
+ "%s: i2c_add_driver() for %s failed (%d)\n",
+ __func__, cmi_lcd_i2c_driver.driver.name, r);
+
+ r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
+ if (r < 0)
+ dev_err(&dev->pdev->dev,
+ "%s: i2c_add_driver() for %s failed (%d)\n",
+ __func__, tc35876x_bridge_i2c_driver.driver.name, r);
+
+ tc35876x_brightness_init(dev);
+}
+
+void tc35876x_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ i2c_del_driver(&tc35876x_bridge_i2c_driver);
+
+ if (cmi_lcd_i2c_client)
+ i2c_del_driver(&cmi_lcd_i2c_driver);
+}
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
new file mode 100644
index 00000000000..b14b7f9e7d1
--- /dev/null
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__
+#define __MDFLD_DSI_LVDS_BRIDGE_H__
+
+void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state);
+void tc35876x_configure_lvds_bridge(struct drm_device *dev);
+void tc35876x_brightness_control(struct drm_device *dev, int level);
+void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev);
+void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev);
+void tc35876x_init(struct drm_device *dev);
+void tc35876x_exit(void);
+
+extern const struct panel_funcs mdfld_tc35876x_funcs;
+
+#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
new file mode 100644
index 00000000000..4d341db462a
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -0,0 +1,28 @@
+menu "I2C encoder or helper chips"
+ depends on DRM && DRM_KMS_HELPER && I2C
+
+config DRM_I2C_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ default m if DRM_NOUVEAU
+ help
+ Support for Chrontel ch7006 and similar TV encoders, found
+ on some nVidia video cards.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+
+config DRM_I2C_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ default m if DRM_NOUVEAU
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters, used in some nVidia
+ video cards.
+
+config DRM_I2C_NXP_TDA998X
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
+
+endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 6d2abaf35ba..43aa33baebe 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -2,3 +2,9 @@ ccflags-y := -Iinclude/drm
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
+
+sil164-y := sil164_drv.o
+obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 81681a07a80..51fa3239202 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/module.h>
+
#include "ch7006_priv.h"
/* DRM encoder functions */
@@ -33,7 +35,7 @@ static void ch7006_encoder_set_config(struct drm_encoder *encoder,
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
- priv->params = params;
+ priv->params = *(struct ch7006_encoder_params *)params;
}
static void ch7006_encoder_destroy(struct drm_encoder *encoder)
@@ -86,7 +88,7 @@ static void ch7006_encoder_restore(struct drm_encoder *encoder)
}
static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
@@ -114,7 +116,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
- struct ch7006_encoder_params *params = priv->params;
+ struct ch7006_encoder_params *params = &priv->params;
struct ch7006_state *state = &priv->state;
uint8_t *regs = state->regs;
struct ch7006_mode *mode = priv->mode;
@@ -212,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector);
@@ -250,28 +252,25 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
- priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "scale", 2);
- priv->scale_property->values[0] = 0;
- priv->scale_property->values[1] = 2;
+ priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
- drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector);
- drm_connector_attach_property(connector, conf->tv_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
priv->subconnector);
- drm_connector_attach_property(connector, conf->tv_left_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
priv->hmargin);
- drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_connector_attach_property(connector, conf->tv_mode_property,
+ drm_object_attach_property(&connector->base, conf->tv_mode_property,
priv->norm);
- drm_connector_attach_property(connector, conf->tv_brightness_property,
+ drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
- drm_connector_attach_property(connector, conf->tv_contrast_property,
+ drm_object_attach_property(&connector->base, conf->tv_contrast_property,
priv->contrast);
- drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+ drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
priv->flicker);
- drm_connector_attach_property(connector, priv->scale_property,
+ drm_object_attach_property(&connector->base, priv->scale_property,
priv->scale);
return 0;
@@ -365,7 +364,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
}
@@ -428,6 +427,17 @@ static int ch7006_remove(struct i2c_client *client)
return 0;
}
+static int ch7006_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ ch7006_dbg(client, "\n");
+
+ ch7006_write(client, 0x3d, 0x0);
+
+ return 0;
+}
+
static int ch7006_encoder_init(struct i2c_client *client,
struct drm_device *dev,
struct drm_encoder_slave *encoder)
@@ -454,6 +464,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
priv->hmargin = 50;
priv->vmargin = 50;
priv->last_dpms = -1;
+ priv->chip_version = ch7006_read(client, CH7006_VERSION_ID);
if (ch7006_tv_norm) {
for (i = 0; i < NUM_TV_NORMS; i++) {
@@ -483,6 +494,10 @@ static struct i2c_device_id ch7006_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);
+static const struct dev_pm_ops ch7006_pm_ops = {
+ .resume = ch7006_resume,
+};
+
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
@@ -490,6 +505,7 @@ static struct drm_i2c_encoder_driver ch7006_driver = {
.driver = {
.name = "ch7006",
+ .pm = &ch7006_pm_ops,
},
.id_table = ch7006_ids,
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index e447dfb6389..9b83574141a 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -172,7 +172,7 @@ struct ch7006_mode ch7006_modes[] = {
};
struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
- struct drm_display_mode *drm_mode)
+ const struct drm_display_mode *drm_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_mode *mode;
@@ -316,7 +316,10 @@ void ch7006_setup_power_state(struct drm_encoder *encoder)
}
} else {
- *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ if (priv->chip_version >= 0x20)
+ *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ else
+ *power |= bitfs(CH7006_POWER_LEVEL, POWER_OFF);
}
}
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index b06d3d93d8a..ce577841f93 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -27,10 +27,10 @@
#ifndef __DRM_I2C_CH7006_PRIV_H__
#define __DRM_I2C_CH7006_PRIV_H__
-#include "drmP.h"
-#include "drm_crtc_helper.h"
-#include "drm_encoder_slave.h"
-#include "i2c/ch7006.h"
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/i2c/ch7006.h>
typedef int64_t fixed;
#define fixed1 (1LL << 32)
@@ -77,7 +77,7 @@ struct ch7006_state {
};
struct ch7006_priv {
- struct ch7006_encoder_params *params;
+ struct ch7006_encoder_params params;
struct ch7006_mode *mode;
struct ch7006_state state;
@@ -95,6 +95,7 @@ struct ch7006_priv {
int flicker;
int scale;
+ int chip_version;
int last_dpms;
};
@@ -110,7 +111,7 @@ extern struct ch7006_tv_norm_info ch7006_tv_norms[];
extern struct ch7006_mode ch7006_modes[];
struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
- struct drm_display_mode *drm_mode);
+ const struct drm_display_mode *drm_mode);
void ch7006_setup_levels(struct drm_encoder *encoder);
void ch7006_setup_subcarrier(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
new file mode 100644
index 00000000000..002ce787433
--- /dev/null
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/i2c/sil164.h>
+
+struct sil164_priv {
+ struct sil164_encoder_params config;
+ struct i2c_client *duallink_slave;
+
+ uint8_t saved_state[0x10];
+ uint8_t saved_slave_state[0x10];
+};
+
+#define to_sil164_priv(x) \
+ ((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
+
+#define sil164_dbg(client, format, ...) do { \
+ if (drm_debug & DRM_UT_KMS) \
+ dev_printk(KERN_DEBUG, &client->dev, \
+ "%s: " format, __func__, ## __VA_ARGS__); \
+ } while (0)
+#define sil164_info(client, format, ...) \
+ dev_info(&client->dev, format, __VA_ARGS__)
+#define sil164_err(client, format, ...) \
+ dev_err(&client->dev, format, __VA_ARGS__)
+
+#define SIL164_I2C_ADDR_MASTER 0x38
+#define SIL164_I2C_ADDR_SLAVE 0x39
+
+/* HW register definitions */
+
+#define SIL164_VENDOR_LO 0x0
+#define SIL164_VENDOR_HI 0x1
+#define SIL164_DEVICE_LO 0x2
+#define SIL164_DEVICE_HI 0x3
+#define SIL164_REVISION 0x4
+#define SIL164_FREQ_MIN 0x6
+#define SIL164_FREQ_MAX 0x7
+#define SIL164_CONTROL0 0x8
+# define SIL164_CONTROL0_POWER_ON 0x01
+# define SIL164_CONTROL0_EDGE_RISING 0x02
+# define SIL164_CONTROL0_INPUT_24BIT 0x04
+# define SIL164_CONTROL0_DUAL_EDGE 0x08
+# define SIL164_CONTROL0_HSYNC_ON 0x10
+# define SIL164_CONTROL0_VSYNC_ON 0x20
+#define SIL164_DETECT 0x9
+# define SIL164_DETECT_INTR_STAT 0x01
+# define SIL164_DETECT_HOTPLUG_STAT 0x02
+# define SIL164_DETECT_RECEIVER_STAT 0x04
+# define SIL164_DETECT_INTR_MODE_RECEIVER 0x00
+# define SIL164_DETECT_INTR_MODE_HOTPLUG 0x08
+# define SIL164_DETECT_OUT_MODE_HIGH 0x00
+# define SIL164_DETECT_OUT_MODE_INTR 0x10
+# define SIL164_DETECT_OUT_MODE_RECEIVER 0x20
+# define SIL164_DETECT_OUT_MODE_HOTPLUG 0x30
+# define SIL164_DETECT_VSWING_STAT 0x80
+#define SIL164_CONTROL1 0xa
+# define SIL164_CONTROL1_DESKEW_ENABLE 0x10
+# define SIL164_CONTROL1_DESKEW_INCR_SHIFT 5
+#define SIL164_GPIO 0xb
+#define SIL164_CONTROL2 0xc
+# define SIL164_CONTROL2_FILTER_ENABLE 0x01
+# define SIL164_CONTROL2_FILTER_SETTING_SHIFT 1
+# define SIL164_CONTROL2_DUALLINK_MASTER 0x40
+# define SIL164_CONTROL2_SYNC_CONT 0x80
+#define SIL164_DUALLINK 0xd
+# define SIL164_DUALLINK_ENABLE 0x10
+# define SIL164_DUALLINK_SKEW_SHIFT 5
+#define SIL164_PLLZONE 0xe
+# define SIL164_PLLZONE_STAT 0x08
+# define SIL164_PLLZONE_FORCE_ON 0x10
+# define SIL164_PLLZONE_FORCE_HIGH 0x20
+
+/* HW access functions */
+
+static void
+sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ sil164_err(client, "Error %d writing to subaddress 0x%x\n",
+ ret, addr);
+}
+
+static uint8_t
+sil164_read(struct i2c_client *client, uint8_t addr)
+{
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ sil164_err(client, "Error %d reading from subaddress 0x%x\n",
+ ret, addr);
+ return 0;
+}
+
+static void
+sil164_save_state(struct i2c_client *client, uint8_t *state)
+{
+ int i;
+
+ for (i = 0x8; i <= 0xe; i++)
+ state[i] = sil164_read(client, i);
+}
+
+static void
+sil164_restore_state(struct i2c_client *client, uint8_t *state)
+{
+ int i;
+
+ for (i = 0x8; i <= 0xe; i++)
+ sil164_write(client, i, state[i]);
+}
+
+static void
+sil164_set_power_state(struct i2c_client *client, bool on)
+{
+ uint8_t control0 = sil164_read(client, SIL164_CONTROL0);
+
+ if (on)
+ control0 |= SIL164_CONTROL0_POWER_ON;
+ else
+ control0 &= ~SIL164_CONTROL0_POWER_ON;
+
+ sil164_write(client, SIL164_CONTROL0, control0);
+}
+
+static void
+sil164_init_state(struct i2c_client *client,
+ struct sil164_encoder_params *config,
+ bool duallink)
+{
+ sil164_write(client, SIL164_CONTROL0,
+ SIL164_CONTROL0_HSYNC_ON |
+ SIL164_CONTROL0_VSYNC_ON |
+ (config->input_edge ? SIL164_CONTROL0_EDGE_RISING : 0) |
+ (config->input_width ? SIL164_CONTROL0_INPUT_24BIT : 0) |
+ (config->input_dual ? SIL164_CONTROL0_DUAL_EDGE : 0));
+
+ sil164_write(client, SIL164_DETECT,
+ SIL164_DETECT_INTR_STAT |
+ SIL164_DETECT_OUT_MODE_RECEIVER);
+
+ sil164_write(client, SIL164_CONTROL1,
+ (config->input_skew ? SIL164_CONTROL1_DESKEW_ENABLE : 0) |
+ (((config->input_skew + 4) & 0x7)
+ << SIL164_CONTROL1_DESKEW_INCR_SHIFT));
+
+ sil164_write(client, SIL164_CONTROL2,
+ SIL164_CONTROL2_SYNC_CONT |
+ (config->pll_filter ? 0 : SIL164_CONTROL2_FILTER_ENABLE) |
+ (4 << SIL164_CONTROL2_FILTER_SETTING_SHIFT));
+
+ sil164_write(client, SIL164_PLLZONE, 0);
+
+ if (duallink)
+ sil164_write(client, SIL164_DUALLINK,
+ SIL164_DUALLINK_ENABLE |
+ (((config->duallink_skew + 4) & 0x7)
+ << SIL164_DUALLINK_SKEW_SHIFT));
+ else
+ sil164_write(client, SIL164_DUALLINK, 0);
+}
+
+/* DRM encoder functions */
+
+static void
+sil164_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ priv->config = *(struct sil164_encoder_params *)params;
+}
+
+static void
+sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+ bool on = (mode == DRM_MODE_DPMS_ON);
+ bool duallink = (on && encoder->crtc->mode.clock > 165000);
+
+ sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
+
+ if (priv->duallink_slave)
+ sil164_set_power_state(priv->duallink_slave, duallink);
+}
+
+static void
+sil164_encoder_save(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ sil164_save_state(drm_i2c_encoder_get_client(encoder),
+ priv->saved_state);
+
+ if (priv->duallink_slave)
+ sil164_save_state(priv->duallink_slave,
+ priv->saved_slave_state);
+}
+
+static void
+sil164_encoder_restore(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ sil164_restore_state(drm_i2c_encoder_get_client(encoder),
+ priv->saved_state);
+
+ if (priv->duallink_slave)
+ sil164_restore_state(priv->duallink_slave,
+ priv->saved_slave_state);
+}
+
+static bool
+sil164_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+sil164_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ if (mode->clock < 32000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > 330000 ||
+ (mode->clock > 165000 && !priv->duallink_slave))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void
+sil164_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+ bool duallink = adjusted_mode->clock > 165000;
+
+ sil164_init_state(drm_i2c_encoder_get_client(encoder),
+ &priv->config, duallink);
+
+ if (priv->duallink_slave)
+ sil164_init_state(priv->duallink_slave,
+ &priv->config, duallink);
+
+ sil164_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static enum drm_connector_status
+sil164_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+
+ if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static int
+sil164_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static int
+sil164_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static int
+sil164_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void
+sil164_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ if (priv->duallink_slave)
+ i2c_unregister_device(priv->duallink_slave);
+
+ kfree(priv);
+ drm_i2c_encoder_destroy(encoder);
+}
+
+static struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+ .set_config = sil164_encoder_set_config,
+ .destroy = sil164_encoder_destroy,
+ .dpms = sil164_encoder_dpms,
+ .save = sil164_encoder_save,
+ .restore = sil164_encoder_restore,
+ .mode_fixup = sil164_encoder_mode_fixup,
+ .mode_valid = sil164_encoder_mode_valid,
+ .mode_set = sil164_encoder_mode_set,
+ .detect = sil164_encoder_detect,
+ .get_modes = sil164_encoder_get_modes,
+ .create_resources = sil164_encoder_create_resources,
+ .set_property = sil164_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
+ sil164_read(client, SIL164_VENDOR_LO);
+ int device = sil164_read(client, SIL164_DEVICE_HI) << 8 |
+ sil164_read(client, SIL164_DEVICE_LO);
+ int rev = sil164_read(client, SIL164_REVISION);
+
+ if (vendor != 0x1 || device != 0x6) {
+ sil164_dbg(client, "Unknown device %x:%x.%x\n",
+ vendor, device, rev);
+ return -ENODEV;
+ }
+
+ sil164_info(client, "Detected device %x:%x.%x\n",
+ vendor, device, rev);
+
+ return 0;
+}
+
+static int
+sil164_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static struct i2c_client *
+sil164_detect_slave(struct i2c_client *client)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg = {
+ .addr = SIL164_I2C_ADDR_SLAVE,
+ .len = 0,
+ };
+ const struct i2c_board_info info = {
+ I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE)
+ };
+
+ if (i2c_transfer(adap, &msg, 1) != 1) {
+ sil164_dbg(adap, "No dual-link slave found.");
+ return NULL;
+ }
+
+ return i2c_new_device(adap, &info);
+}
+
+static int
+sil164_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct sil164_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ encoder->slave_priv = priv;
+ encoder->slave_funcs = &sil164_encoder_funcs;
+
+ priv->duallink_slave = sil164_detect_slave(client);
+
+ return 0;
+}
+
+static struct i2c_device_id sil164_ids[] = {
+ { "sil164", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sil164_ids);
+
+static struct drm_i2c_encoder_driver sil164_driver = {
+ .i2c_driver = {
+ .probe = sil164_probe,
+ .remove = sil164_remove,
+ .driver = {
+ .name = "sil164",
+ },
+ .id_table = sil164_ids,
+ },
+ .encoder_init = sil164_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+sil164_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
+}
+
+static void __exit
+sil164_exit(void)
+{
+ drm_i2c_encoder_unregister(&sil164_driver);
+}
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Silicon Image sil164 TMDS transmitter driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(sil164_init);
+module_exit(sil164_exit);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
new file mode 100644
index 00000000000..ac357b02bd3
--- /dev/null
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -0,0 +1,1422 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+
+#include <linux/hdmi.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <sound/asoundef.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_edid.h>
+#include <drm/i2c/tda998x.h>
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct tda998x_priv {
+ struct i2c_client *cec;
+ struct i2c_client *hdmi;
+ uint16_t rev;
+ uint8_t current_page;
+ int dpms;
+ bool is_hdmi_sink;
+ u8 vip_cntrl_0;
+ u8 vip_cntrl_1;
+ u8 vip_cntrl_2;
+ struct tda998x_encoder_params params;
+
+ wait_queue_head_t wq_edid;
+ volatile int wq_edid_wait;
+ struct drm_encoder *encoder;
+};
+
+#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
+
+/* The TDA9988 series of devices use a paged register scheme.. to simplify
+ * things we encode the page # in upper bits of the register #. To read/
+ * write a given register, we need to make sure CURPAGE register is set
+ * appropriately. Which implies reads/writes are not atomic. Fun!
+ */
+
+#define REG(page, addr) (((page) << 8) | (addr))
+#define REG2ADDR(reg) ((reg) & 0xff)
+#define REG2PAGE(reg) (((reg) >> 8) & 0xff)
+
+#define REG_CURPAGE 0xff /* write */
+
+
+/* Page 00h: General Control */
+#define REG_VERSION_LSB REG(0x00, 0x00) /* read */
+#define REG_MAIN_CNTRL0 REG(0x00, 0x01) /* read/write */
+# define MAIN_CNTRL0_SR (1 << 0)
+# define MAIN_CNTRL0_DECS (1 << 1)
+# define MAIN_CNTRL0_DEHS (1 << 2)
+# define MAIN_CNTRL0_CECS (1 << 3)
+# define MAIN_CNTRL0_CEHS (1 << 4)
+# define MAIN_CNTRL0_SCALER (1 << 7)
+#define REG_VERSION_MSB REG(0x00, 0x02) /* read */
+#define REG_SOFTRESET REG(0x00, 0x0a) /* write */
+# define SOFTRESET_AUDIO (1 << 0)
+# define SOFTRESET_I2C_MASTER (1 << 1)
+#define REG_DDC_DISABLE REG(0x00, 0x0b) /* read/write */
+#define REG_CCLK_ON REG(0x00, 0x0c) /* read/write */
+#define REG_I2C_MASTER REG(0x00, 0x0d) /* read/write */
+# define I2C_MASTER_DIS_MM (1 << 0)
+# define I2C_MASTER_DIS_FILT (1 << 1)
+# define I2C_MASTER_APP_STRT_LAT (1 << 2)
+#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_SPDIF (1 << 3)
+#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
+#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
+#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
+# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
+#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
+#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
+#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
+#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
+#define REG_ENA_AP REG(0x00, 0x1e) /* read/write */
+#define REG_VIP_CNTRL_0 REG(0x00, 0x20) /* write */
+# define VIP_CNTRL_0_MIRR_A (1 << 7)
+# define VIP_CNTRL_0_SWAP_A(x) (((x) & 7) << 4)
+# define VIP_CNTRL_0_MIRR_B (1 << 3)
+# define VIP_CNTRL_0_SWAP_B(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_1 REG(0x00, 0x21) /* write */
+# define VIP_CNTRL_1_MIRR_C (1 << 7)
+# define VIP_CNTRL_1_SWAP_C(x) (((x) & 7) << 4)
+# define VIP_CNTRL_1_MIRR_D (1 << 3)
+# define VIP_CNTRL_1_SWAP_D(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_2 REG(0x00, 0x22) /* write */
+# define VIP_CNTRL_2_MIRR_E (1 << 7)
+# define VIP_CNTRL_2_SWAP_E(x) (((x) & 7) << 4)
+# define VIP_CNTRL_2_MIRR_F (1 << 3)
+# define VIP_CNTRL_2_SWAP_F(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_3 REG(0x00, 0x23) /* write */
+# define VIP_CNTRL_3_X_TGL (1 << 0)
+# define VIP_CNTRL_3_H_TGL (1 << 1)
+# define VIP_CNTRL_3_V_TGL (1 << 2)
+# define VIP_CNTRL_3_EMB (1 << 3)
+# define VIP_CNTRL_3_SYNC_DE (1 << 4)
+# define VIP_CNTRL_3_SYNC_HS (1 << 5)
+# define VIP_CNTRL_3_DE_INT (1 << 6)
+# define VIP_CNTRL_3_EDGE (1 << 7)
+#define REG_VIP_CNTRL_4 REG(0x00, 0x24) /* write */
+# define VIP_CNTRL_4_BLC(x) (((x) & 3) << 0)
+# define VIP_CNTRL_4_BLANKIT(x) (((x) & 3) << 2)
+# define VIP_CNTRL_4_CCIR656 (1 << 4)
+# define VIP_CNTRL_4_656_ALT (1 << 5)
+# define VIP_CNTRL_4_TST_656 (1 << 6)
+# define VIP_CNTRL_4_TST_PAT (1 << 7)
+#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
+# define VIP_CNTRL_5_CKCASE (1 << 0)
+# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
+#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
+# define MUX_AP_SELECT_I2S 0x64
+# define MUX_AP_SELECT_SPDIF 0x40
+#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
+#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
+# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
+# define MAT_CONTRL_MAT_BP (1 << 2)
+#define REG_VIDFORMAT REG(0x00, 0xa0) /* write */
+#define REG_REFPIX_MSB REG(0x00, 0xa1) /* write */
+#define REG_REFPIX_LSB REG(0x00, 0xa2) /* write */
+#define REG_REFLINE_MSB REG(0x00, 0xa3) /* write */
+#define REG_REFLINE_LSB REG(0x00, 0xa4) /* write */
+#define REG_NPIX_MSB REG(0x00, 0xa5) /* write */
+#define REG_NPIX_LSB REG(0x00, 0xa6) /* write */
+#define REG_NLINE_MSB REG(0x00, 0xa7) /* write */
+#define REG_NLINE_LSB REG(0x00, 0xa8) /* write */
+#define REG_VS_LINE_STRT_1_MSB REG(0x00, 0xa9) /* write */
+#define REG_VS_LINE_STRT_1_LSB REG(0x00, 0xaa) /* write */
+#define REG_VS_PIX_STRT_1_MSB REG(0x00, 0xab) /* write */
+#define REG_VS_PIX_STRT_1_LSB REG(0x00, 0xac) /* write */
+#define REG_VS_LINE_END_1_MSB REG(0x00, 0xad) /* write */
+#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
+#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
+#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
+#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
+#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
+#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
+#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
+#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
+#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
+#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
+#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
+#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
+#define REG_HS_PIX_START_LSB REG(0x00, 0xba) /* write */
+#define REG_HS_PIX_STOP_MSB REG(0x00, 0xbb) /* write */
+#define REG_HS_PIX_STOP_LSB REG(0x00, 0xbc) /* write */
+#define REG_VWIN_START_1_MSB REG(0x00, 0xbd) /* write */
+#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
+#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
+#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
+#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
+#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
+#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
+#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
+#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
+#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
+#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
+#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
+#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
+# define TBG_CNTRL_0_TOP_TGL (1 << 0)
+# define TBG_CNTRL_0_TOP_SEL (1 << 1)
+# define TBG_CNTRL_0_DE_EXT (1 << 2)
+# define TBG_CNTRL_0_TOP_EXT (1 << 3)
+# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
+# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
+# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
+#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
+# define TBG_CNTRL_1_H_TGL (1 << 0)
+# define TBG_CNTRL_1_V_TGL (1 << 1)
+# define TBG_CNTRL_1_TGL_EN (1 << 2)
+# define TBG_CNTRL_1_X_EXT (1 << 3)
+# define TBG_CNTRL_1_H_EXT (1 << 4)
+# define TBG_CNTRL_1_V_EXT (1 << 5)
+# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
+#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
+#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
+# define HVF_CNTRL_0_SM (1 << 7)
+# define HVF_CNTRL_0_RWB (1 << 6)
+# define HVF_CNTRL_0_PREFIL(x) (((x) & 3) << 2)
+# define HVF_CNTRL_0_INTPOL(x) (((x) & 3) << 0)
+#define REG_HVF_CNTRL_1 REG(0x00, 0xe5) /* write */
+# define HVF_CNTRL_1_FOR (1 << 0)
+# define HVF_CNTRL_1_YUVBLK (1 << 1)
+# define HVF_CNTRL_1_VQR(x) (((x) & 3) << 2)
+# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
+# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
+#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
+#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
+# define I2S_FORMAT(x) (((x) & 3) << 0)
+#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
+# define AIP_CLKSEL_AIP_SPDIF (0 << 3)
+# define AIP_CLKSEL_AIP_I2S (1 << 3)
+# define AIP_CLKSEL_FS_ACLK (0 << 0)
+# define AIP_CLKSEL_FS_MCLK (1 << 0)
+# define AIP_CLKSEL_FS_FS64SPDIF (2 << 0)
+
+/* Page 02h: PLL settings */
+#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
+# define PLL_SERIAL_1_SRL_FDN (1 << 0)
+# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
+# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
+#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
+# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
+#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
+# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
+# define PLL_SERIAL_3_SRL_DE (1 << 2)
+# define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4)
+#define REG_SERIALIZER REG(0x02, 0x03) /* read/write */
+#define REG_BUFFER_OUT REG(0x02, 0x04) /* read/write */
+#define REG_PLL_SCG1 REG(0x02, 0x05) /* read/write */
+#define REG_PLL_SCG2 REG(0x02, 0x06) /* read/write */
+#define REG_PLL_SCGN1 REG(0x02, 0x07) /* read/write */
+#define REG_PLL_SCGN2 REG(0x02, 0x08) /* read/write */
+#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
+#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
+#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
+# define AUDIO_DIV_SERCLK_1 0
+# define AUDIO_DIV_SERCLK_2 1
+# define AUDIO_DIV_SERCLK_4 2
+# define AUDIO_DIV_SERCLK_8 3
+# define AUDIO_DIV_SERCLK_16 4
+# define AUDIO_DIV_SERCLK_32 5
+#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
+# define SEL_CLK_SEL_CLK1 (1 << 0)
+# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
+# define SEL_CLK_ENA_SC_CLK (1 << 3)
+#define REG_ANA_GENERAL REG(0x02, 0x12) /* read/write */
+
+
+/* Page 09h: EDID Control */
+#define REG_EDID_DATA_0 REG(0x09, 0x00) /* read */
+/* next 127 successive registers are the EDID block */
+#define REG_EDID_CTRL REG(0x09, 0xfa) /* read/write */
+#define REG_DDC_ADDR REG(0x09, 0xfb) /* read/write */
+#define REG_DDC_OFFS REG(0x09, 0xfc) /* read/write */
+#define REG_DDC_SEGM_ADDR REG(0x09, 0xfd) /* read/write */
+#define REG_DDC_SEGM REG(0x09, 0xfe) /* read/write */
+
+
+/* Page 10h: information frames and packets */
+#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
+#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
+#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
+#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
+#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
+
+
+/* Page 11h: audio settings and content info packets */
+#define REG_AIP_CNTRL_0 REG(0x11, 0x00) /* read/write */
+# define AIP_CNTRL_0_RST_FIFO (1 << 0)
+# define AIP_CNTRL_0_SWAP (1 << 1)
+# define AIP_CNTRL_0_LAYOUT (1 << 2)
+# define AIP_CNTRL_0_ACR_MAN (1 << 5)
+# define AIP_CNTRL_0_RST_CTS (1 << 6)
+#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
+# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
+# define CA_I2S_HBR_CHSTAT (1 << 6)
+#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
+#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
+#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
+#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
+#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
+#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
+#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
+#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
+# define CTS_N_K(x) (((x) & 7) << 0)
+# define CTS_N_M(x) (((x) & 3) << 4)
+#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
+# define ENC_CNTRL_RST_ENC (1 << 0)
+# define ENC_CNTRL_RST_SEL (1 << 1)
+# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
+#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
+# define DIP_FLAGS_ACR (1 << 0)
+# define DIP_FLAGS_GC (1 << 1)
+#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
+# define DIP_IF_FLAGS_IF1 (1 << 1)
+# define DIP_IF_FLAGS_IF2 (1 << 2)
+# define DIP_IF_FLAGS_IF3 (1 << 3)
+# define DIP_IF_FLAGS_IF4 (1 << 4)
+# define DIP_IF_FLAGS_IF5 (1 << 5)
+#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
+
+
+/* Page 12h: HDCP and OTP */
+#define REG_TX3 REG(0x12, 0x9a) /* read/write */
+#define REG_TX4 REG(0x12, 0x9b) /* read/write */
+# define TX4_PD_RAM (1 << 1)
+#define REG_TX33 REG(0x12, 0xb8) /* read/write */
+# define TX33_HDMI (1 << 1)
+
+
+/* Page 13h: Gamut related metadata packets */
+
+
+
+/* CEC registers: (not paged)
+ */
+#define REG_CEC_INTSTATUS 0xee /* read */
+# define CEC_INTSTATUS_CEC (1 << 0)
+# define CEC_INTSTATUS_HDMI (1 << 1)
+#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
+# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
+# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
+# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
+# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
+#define REG_CEC_RXSHPDINTENA 0xfc /* read/write */
+#define REG_CEC_RXSHPDINT 0xfd /* read */
+#define REG_CEC_RXSHPDLEV 0xfe /* read */
+# define CEC_RXSHPDLEV_RXSENS (1 << 0)
+# define CEC_RXSHPDLEV_HPD (1 << 1)
+
+#define REG_CEC_ENAMODS 0xff /* read/write */
+# define CEC_ENAMODS_DIS_FRO (1 << 6)
+# define CEC_ENAMODS_DIS_CCLK (1 << 5)
+# define CEC_ENAMODS_EN_RXSENS (1 << 2)
+# define CEC_ENAMODS_EN_HDMI (1 << 1)
+# define CEC_ENAMODS_EN_CEC (1 << 0)
+
+
+/* Device versions: */
+#define TDA9989N2 0x0101
+#define TDA19989 0x0201
+#define TDA19989N2 0x0202
+#define TDA19988 0x0301
+
+static void
+cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val)
+{
+ struct i2c_client *client = priv->cec;
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+}
+
+static uint8_t
+cec_read(struct tda998x_priv *priv, uint8_t addr)
+{
+ struct i2c_client *client = priv->cec;
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
+ return 0;
+}
+
+static int
+set_page(struct tda998x_priv *priv, uint16_t reg)
+{
+ if (REG2PAGE(reg) != priv->current_page) {
+ struct i2c_client *client = priv->hdmi;
+ uint8_t buf[] = {
+ REG_CURPAGE, REG2PAGE(reg)
+ };
+ int ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(&client->dev, "setpage %04x err %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ priv->current_page = REG2PAGE(reg);
+ }
+ return 0;
+}
+
+static int
+reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
+{
+ struct i2c_client *client = priv->hdmi;
+ uint8_t addr = REG2ADDR(reg);
+ int ret;
+
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, buf, cnt);
+ if (ret < 0)
+ goto fail;
+
+ return ret;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+ return ret;
+}
+
+static void
+reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
+{
+ struct i2c_client *client = priv->hdmi;
+ uint8_t buf[cnt+1];
+ int ret;
+
+ buf[0] = REG2ADDR(reg);
+ memcpy(&buf[1], p, cnt);
+
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return;
+
+ ret = i2c_master_send(client, buf, cnt + 1);
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static int
+reg_read(struct tda998x_priv *priv, uint16_t reg)
+{
+ uint8_t val = 0;
+ int ret;
+
+ ret = reg_read_range(priv, reg, &val, sizeof(val));
+ if (ret < 0)
+ return ret;
+ return val;
+}
+
+static void
+reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
+{
+ struct i2c_client *client = priv->hdmi;
+ uint8_t buf[] = {REG2ADDR(reg), val};
+ int ret;
+
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return;
+
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
+{
+ struct i2c_client *client = priv->hdmi;
+ uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
+ int ret;
+
+ ret = set_page(priv, reg);
+ if (ret < 0)
+ return;
+
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
+{
+ int old_val;
+
+ old_val = reg_read(priv, reg);
+ if (old_val >= 0)
+ reg_write(priv, reg, old_val | val);
+}
+
+static void
+reg_clear(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
+{
+ int old_val;
+
+ old_val = reg_read(priv, reg);
+ if (old_val >= 0)
+ reg_write(priv, reg, old_val & ~val);
+}
+
+static void
+tda998x_reset(struct tda998x_priv *priv)
+{
+ /* reset audio and i2c master: */
+ reg_write(priv, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ msleep(50);
+ reg_write(priv, REG_SOFTRESET, 0);
+ msleep(50);
+
+ /* reset transmitter: */
+ reg_set(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+ reg_clear(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+
+ /* PLL registers common configuration */
+ reg_write(priv, REG_PLL_SERIAL_1, 0x00);
+ reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
+ reg_write(priv, REG_PLL_SERIAL_3, 0x00);
+ reg_write(priv, REG_SERIALIZER, 0x00);
+ reg_write(priv, REG_BUFFER_OUT, 0x00);
+ reg_write(priv, REG_PLL_SCG1, 0x00);
+ reg_write(priv, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
+ reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+ reg_write(priv, REG_PLL_SCGN1, 0xfa);
+ reg_write(priv, REG_PLL_SCGN2, 0x00);
+ reg_write(priv, REG_PLL_SCGR1, 0x5b);
+ reg_write(priv, REG_PLL_SCGR2, 0x00);
+ reg_write(priv, REG_PLL_SCG2, 0x10);
+
+ /* Write the default value MUX register */
+ reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
+}
+
+/*
+ * only 2 interrupts may occur: screen plug/unplug and EDID read
+ */
+static irqreturn_t tda998x_irq_thread(int irq, void *data)
+{
+ struct tda998x_priv *priv = data;
+ u8 sta, cec, lvl, flag0, flag1, flag2;
+
+ if (!priv)
+ return IRQ_HANDLED;
+ sta = cec_read(priv, REG_CEC_INTSTATUS);
+ cec = cec_read(priv, REG_CEC_RXSHPDINT);
+ lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
+ flag0 = reg_read(priv, REG_INT_FLAGS_0);
+ flag1 = reg_read(priv, REG_INT_FLAGS_1);
+ flag2 = reg_read(priv, REG_INT_FLAGS_2);
+ DRM_DEBUG_DRIVER(
+ "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
+ sta, cec, lvl, flag0, flag1, flag2);
+ if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
+ priv->wq_edid_wait = 0;
+ wake_up(&priv->wq_edid);
+ } else if (cec != 0) { /* HPD change */
+ if (priv->encoder && priv->encoder->dev)
+ drm_helper_hpd_irq_event(priv->encoder->dev);
+ }
+ return IRQ_HANDLED;
+}
+
+static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
+{
+ int sum = 0;
+
+ while (bytes--)
+ sum -= *buf++;
+ return sum;
+}
+
+#define HB(x) (x)
+#define PB(x) (HB(2) + 1 + (x))
+
+static void
+tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
+ uint8_t *buf, size_t size)
+{
+ buf[PB(0)] = tda998x_cksum(buf, size);
+
+ reg_clear(priv, REG_DIP_IF_FLAGS, bit);
+ reg_write_range(priv, addr, buf, size);
+ reg_set(priv, REG_DIP_IF_FLAGS, bit);
+}
+
+static void
+tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
+{
+ u8 buf[PB(HDMI_AUDIO_INFOFRAME_SIZE) + 1];
+
+ memset(buf, 0, sizeof(buf));
+ buf[HB(0)] = HDMI_INFOFRAME_TYPE_AUDIO;
+ buf[HB(1)] = 0x01;
+ buf[HB(2)] = HDMI_AUDIO_INFOFRAME_SIZE;
+ buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
+ buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
+ buf[PB(4)] = p->audio_frame[4];
+ buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+
+ tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
+ sizeof(buf));
+}
+
+static void
+tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
+{
+ u8 buf[PB(HDMI_AVI_INFOFRAME_SIZE) + 1];
+
+ memset(buf, 0, sizeof(buf));
+ buf[HB(0)] = HDMI_INFOFRAME_TYPE_AVI;
+ buf[HB(1)] = 0x02;
+ buf[HB(2)] = HDMI_AVI_INFOFRAME_SIZE;
+ buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+ buf[PB(2)] = HDMI_ACTIVE_ASPECT_PICTURE;
+ buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
+ buf[PB(4)] = drm_match_cea_mode(mode);
+
+ tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
+ sizeof(buf));
+}
+
+static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
+{
+ if (on) {
+ reg_set(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_clear(priv, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ } else {
+ reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ }
+}
+
+static void
+tda998x_configure_audio(struct tda998x_priv *priv,
+ struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+{
+ uint8_t buf[6], clksel_aip, clksel_fs, cts_n, adiv;
+ uint32_t n;
+
+ /* Enable audio ports */
+ reg_write(priv, REG_ENA_AP, p->audio_cfg);
+ reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg);
+
+ /* Set audio input source */
+ switch (p->audio_format) {
+ case AFMT_SPDIF:
+ reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF);
+ clksel_aip = AIP_CLKSEL_AIP_SPDIF;
+ clksel_fs = AIP_CLKSEL_FS_FS64SPDIF;
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ break;
+
+ case AFMT_I2S:
+ reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S);
+ clksel_aip = AIP_CLKSEL_AIP_I2S;
+ clksel_fs = AIP_CLKSEL_FS_ACLK;
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+
+ reg_write(priv, REG_AIP_CLKSEL, clksel_aip);
+ reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT |
+ AIP_CNTRL_0_ACR_MAN); /* auto CTS */
+ reg_write(priv, REG_CTS_N, cts_n);
+
+ /*
+ * Audio input somehow depends on HDMI line rate which is
+ * related to pixclk. Testing showed that modes with pixclk
+ * >100MHz need a larger divider while <40MHz need the default.
+ * There is no detailed info in the datasheet, so we just
+ * assume 100MHz requires larger divider.
+ */
+ adiv = AUDIO_DIV_SERCLK_8;
+ if (mode->clock > 100000)
+ adiv++; /* AUDIO_DIV_SERCLK_16 */
+
+ /* S/PDIF asks for a larger divider */
+ if (p->audio_format == AFMT_SPDIF)
+ adiv++; /* AUDIO_DIV_SERCLK_16 or _32 */
+
+ reg_write(priv, REG_AUDIO_DIV, adiv);
+
+ /*
+ * This is the approximate value of N, which happens to be
+ * the recommended values for non-coherent clocks.
+ */
+ n = 128 * p->audio_sample_rate / 1000;
+
+ /* Write the CTS and N values */
+ buf[0] = 0x44;
+ buf[1] = 0x42;
+ buf[2] = 0x01;
+ buf[3] = n;
+ buf[4] = n >> 8;
+ buf[5] = n >> 16;
+ reg_write_range(priv, REG_ACR_CTS_0, buf, 6);
+
+ /* Set CTS clock reference */
+ reg_write(priv, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
+
+ /* Reset CTS generator */
+ reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+ reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+
+ /* Write the channel status */
+ buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
+ buf[1] = 0x00;
+ buf[2] = IEC958_AES3_CON_FS_NOTID;
+ buf[3] = IEC958_AES4_CON_ORIGFS_NOTID |
+ IEC958_AES4_CON_MAX_WORDLEN_24;
+ reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
+
+ tda998x_audio_mute(priv, true);
+ msleep(20);
+ tda998x_audio_mute(priv, false);
+
+ /* Write the audio information packet */
+ tda998x_write_aif(priv, p);
+}
+
+/* DRM encoder functions */
+
+static void
+tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ struct tda998x_encoder_params *p = params;
+
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->params = *p;
+}
+
+static void
+tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ /* we only care about on or off: */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == priv->dpms)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* enable video ports, audio will be enabled later */
+ reg_write(priv, REG_ENA_VP_0, 0xff);
+ reg_write(priv, REG_ENA_VP_1, 0xff);
+ reg_write(priv, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* disable video ports */
+ reg_write(priv, REG_ENA_VP_0, 0x00);
+ reg_write(priv, REG_ENA_VP_1, 0x00);
+ reg_write(priv, REG_ENA_VP_2, 0x00);
+ break;
+ }
+
+ priv->dpms = mode;
+}
+
+static void
+tda998x_encoder_save(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static void
+tda998x_encoder_restore(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static bool
+tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+tda998x_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 150000)
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void
+tda998x_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ uint16_t ref_pix, ref_line, n_pix, n_line;
+ uint16_t hs_pix_s, hs_pix_e;
+ uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
+ uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
+ uint16_t vwin1_line_s, vwin1_line_e;
+ uint16_t vwin2_line_s, vwin2_line_e;
+ uint16_t de_pix_s, de_pix_e;
+ uint8_t reg, div, rep;
+
+ /*
+ * Internally TDA998x is using ITU-R BT.656 style sync but
+ * we get VESA style sync. TDA998x is using a reference pixel
+ * relative to ITU to sync to the input frame and for output
+ * sync generation. Currently, we are using reference detection
+ * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
+ * which is position of rising VS with coincident rising HS.
+ *
+ * Now there is some issues to take care of:
+ * - HDMI data islands require sync-before-active
+ * - TDA998x register values must be > 0 to be enabled
+ * - REFLINE needs an additional offset of +1
+ * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
+ *
+ * So we add +1 to all horizontal and vertical register values,
+ * plus an additional +3 for REFPIX as we are using RGB input only.
+ */
+ n_pix = mode->htotal;
+ n_line = mode->vtotal;
+
+ hs_pix_e = mode->hsync_end - mode->hdisplay;
+ hs_pix_s = mode->hsync_start - mode->hdisplay;
+ de_pix_e = mode->htotal;
+ de_pix_s = mode->htotal - mode->hdisplay;
+ ref_pix = 3 + hs_pix_s;
+
+ /*
+ * Attached LCD controllers may generate broken sync. Allow
+ * those to adjust the position of the rising VS edge by adding
+ * HSKEW to ref_pix.
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
+ ref_pix += adjusted_mode->hskew;
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
+ ref_line = 1 + mode->vsync_start - mode->vdisplay;
+ vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = mode->vsync_start - mode->vdisplay;
+ vs1_line_e = vs1_line_s +
+ mode->vsync_end - mode->vsync_start;
+ vwin2_line_s = vwin2_line_e = 0;
+ vs2_pix_s = vs2_pix_e = 0;
+ vs2_line_s = vs2_line_e = 0;
+ } else {
+ ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
+ vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
+ vs1_line_e = vs1_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ vwin2_line_s = vwin1_line_s + mode->vtotal/2;
+ vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
+ vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
+ vs2_line_s = vs1_line_s + mode->vtotal/2 ;
+ vs2_line_e = vs2_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ }
+
+ div = 148500 / mode->clock;
+ if (div != 0) {
+ div--;
+ if (div > 3)
+ div = 3;
+ }
+
+ /* mute the audio FIFO: */
+ reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+
+ /* set HDMI HDCP mode off: */
+ reg_write(priv, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_clear(priv, REG_TX33, TX33_HDMI);
+ reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
+
+ /* no pre-filter or interpolator: */
+ reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
+ HVF_CNTRL_0_INTPOL(0));
+ reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
+ reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
+ VIP_CNTRL_4_BLC(0));
+
+ reg_clear(priv, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
+ reg_clear(priv, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR |
+ PLL_SERIAL_3_SRL_DE);
+ reg_write(priv, REG_SERIALIZER, 0);
+ reg_write(priv, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
+
+ /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
+ rep = 0;
+ reg_write(priv, REG_RPT_CNTRL, 0);
+ reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
+ SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+
+ reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
+ PLL_SERIAL_2_SRL_PR(rep));
+
+ /* set color matrix bypass flag: */
+ reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
+ MAT_CONTRL_MAT_SC(1));
+
+ /* set BIAS tmds value: */
+ reg_write(priv, REG_ANA_GENERAL, 0x09);
+
+ /*
+ * Sync on rising HSYNC/VSYNC
+ */
+ reg = VIP_CNTRL_3_SYNC_HS;
+
+ /*
+ * TDA19988 requires high-active sync at input stage,
+ * so invert low-active sync provided by master encoder here
+ */
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg |= VIP_CNTRL_3_H_TGL;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg |= VIP_CNTRL_3_V_TGL;
+ reg_write(priv, REG_VIP_CNTRL_3, reg);
+
+ reg_write(priv, REG_VIDFORMAT, 0x00);
+ reg_write16(priv, REG_REFPIX_MSB, ref_pix);
+ reg_write16(priv, REG_REFLINE_MSB, ref_line);
+ reg_write16(priv, REG_NPIX_MSB, n_pix);
+ reg_write16(priv, REG_NLINE_MSB, n_line);
+ reg_write16(priv, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
+ reg_write16(priv, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
+ reg_write16(priv, REG_VS_LINE_END_1_MSB, vs1_line_e);
+ reg_write16(priv, REG_VS_PIX_END_1_MSB, vs1_pix_e);
+ reg_write16(priv, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
+ reg_write16(priv, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
+ reg_write16(priv, REG_VS_LINE_END_2_MSB, vs2_line_e);
+ reg_write16(priv, REG_VS_PIX_END_2_MSB, vs2_pix_e);
+ reg_write16(priv, REG_HS_PIX_START_MSB, hs_pix_s);
+ reg_write16(priv, REG_HS_PIX_STOP_MSB, hs_pix_e);
+ reg_write16(priv, REG_VWIN_START_1_MSB, vwin1_line_s);
+ reg_write16(priv, REG_VWIN_END_1_MSB, vwin1_line_e);
+ reg_write16(priv, REG_VWIN_START_2_MSB, vwin2_line_s);
+ reg_write16(priv, REG_VWIN_END_2_MSB, vwin2_line_e);
+ reg_write16(priv, REG_DE_START_MSB, de_pix_s);
+ reg_write16(priv, REG_DE_STOP_MSB, de_pix_e);
+
+ if (priv->rev == TDA19988) {
+ /* let incoming pixels fill the active space (if any) */
+ reg_write(priv, REG_ENABLE_SPACE, 0x00);
+ }
+
+ /*
+ * Always generate sync polarity relative to input sync and
+ * revert input stage toggled sync at output stage
+ */
+ reg = TBG_CNTRL_1_DWIN_DIS | TBG_CNTRL_1_TGL_EN;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg |= TBG_CNTRL_1_H_TGL;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg |= TBG_CNTRL_1_V_TGL;
+ reg_write(priv, REG_TBG_CNTRL_1, reg);
+
+ /* must be last register set: */
+ reg_write(priv, REG_TBG_CNTRL_0, 0);
+
+ /* Only setup the info frames if the sink is HDMI */
+ if (priv->is_hdmi_sink) {
+ /* We need to turn HDMI HDCP stuff on to get audio through */
+ reg &= ~TBG_CNTRL_1_DWIN_DIS;
+ reg_write(priv, REG_TBG_CNTRL_1, reg);
+ reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
+ reg_set(priv, REG_TX33, TX33_HDMI);
+
+ tda998x_write_avi(priv, adjusted_mode);
+
+ if (priv->params.audio_cfg)
+ tda998x_configure_audio(priv, adjusted_mode,
+ &priv->params);
+ }
+}
+
+static enum drm_connector_status
+tda998x_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
+
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int
+read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ uint8_t offset, segptr;
+ int ret, i;
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(priv, REG_DDC_ADDR, 0xa0);
+ reg_write(priv, REG_DDC_OFFS, offset);
+ reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(priv, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ priv->wq_edid_wait = 1;
+ reg_write(priv, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(priv, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ if (priv->hdmi->irq) {
+ i = wait_event_timeout(priv->wq_edid,
+ !priv->wq_edid_wait,
+ msecs_to_jiffies(100));
+ if (i < 0) {
+ dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
+ return i;
+ }
+ } else {
+ for (i = 100; i > 0; i--) {
+ msleep(1);
+ ret = reg_read(priv, REG_INT_FLAGS_2);
+ if (ret < 0)
+ return ret;
+ if (ret & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ }
+ }
+
+ if (i == 0) {
+ dev_err(&priv->hdmi->dev, "read edid timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH);
+ if (ret != EDID_LENGTH) {
+ dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
+ blk, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static uint8_t *
+do_get_edid(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ int j, valid_extensions = 0;
+ uint8_t *block, *new;
+ bool print_bad_edid = drm_debug & DRM_UT_KMS;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ if (priv->rev == TDA19988)
+ reg_clear(priv, REG_TX4, TX4_PD_RAM);
+
+ /* base block fetch */
+ if (read_edid_block(encoder, block, 0))
+ goto fail;
+
+ if (!drm_edid_block_valid(block, 0, print_bad_edid))
+ goto fail;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ goto done;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
+ if (read_edid_block(encoder, ext_block, j))
+ goto fail;
+
+ if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
+ goto fail;
+
+ valid_extensions++;
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+ }
+
+done:
+ if (priv->rev == TDA19988)
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
+
+ return block;
+
+fail:
+ if (priv->rev == TDA19988)
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
+ kfree(block);
+ return NULL;
+}
+
+static int
+tda998x_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ struct edid *edid = (struct edid *)do_get_edid(encoder);
+ int n = 0;
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ kfree(edid);
+ }
+
+ return n;
+}
+
+static int
+tda998x_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ if (priv->hdmi->irq)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ return 0;
+}
+
+static int
+tda998x_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DBG("");
+ return 0;
+}
+
+static void
+tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ /* disable all IRQs and free the IRQ handler */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+ reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+ if (priv->hdmi->irq)
+ free_irq(priv->hdmi->irq, priv);
+
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
+ drm_i2c_encoder_destroy(encoder);
+ kfree(priv);
+}
+
+static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
+ .set_config = tda998x_encoder_set_config,
+ .destroy = tda998x_encoder_destroy,
+ .dpms = tda998x_encoder_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .mode_valid = tda998x_encoder_mode_valid,
+ .mode_set = tda998x_encoder_mode_set,
+ .detect = tda998x_encoder_detect,
+ .get_modes = tda998x_encoder_get_modes,
+ .create_resources = tda998x_encoder_create_resources,
+ .set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ return 0;
+}
+
+static int
+tda998x_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int
+tda998x_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder_slave)
+{
+ struct tda998x_priv *priv;
+ struct device_node *np = client->dev.of_node;
+ u32 video;
+ int rev_lo, rev_hi, ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+
+ priv->current_page = 0xff;
+ priv->hdmi = client;
+ priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ if (!priv->cec) {
+ kfree(priv);
+ return -ENODEV;
+ }
+
+ priv->encoder = &encoder_slave->base;
+ priv->dpms = DRM_MODE_DPMS_OFF;
+
+ encoder_slave->slave_priv = priv;
+ encoder_slave->slave_funcs = &tda998x_encoder_funcs;
+
+ /* wake up the device: */
+ cec_write(priv, REG_CEC_ENAMODS,
+ CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
+
+ tda998x_reset(priv);
+
+ /* read version: */
+ rev_lo = reg_read(priv, REG_VERSION_LSB);
+ rev_hi = reg_read(priv, REG_VERSION_MSB);
+ if (rev_lo < 0 || rev_hi < 0) {
+ ret = rev_lo < 0 ? rev_lo : rev_hi;
+ goto fail;
+ }
+
+ priv->rev = rev_lo | rev_hi << 8;
+
+ /* mask off feature bits: */
+ priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
+
+ switch (priv->rev) {
+ case TDA9989N2:
+ dev_info(&client->dev, "found TDA9989 n2");
+ break;
+ case TDA19989:
+ dev_info(&client->dev, "found TDA19989");
+ break;
+ case TDA19989N2:
+ dev_info(&client->dev, "found TDA19989 n2");
+ break;
+ case TDA19988:
+ dev_info(&client->dev, "found TDA19988");
+ break;
+ default:
+ dev_err(&client->dev, "found unsupported device: %04x\n",
+ priv->rev);
+ goto fail;
+ }
+
+ /* after reset, enable DDC: */
+ reg_write(priv, REG_DDC_DISABLE, 0x00);
+
+ /* set clock on DDC channel: */
+ reg_write(priv, REG_TX3, 39);
+
+ /* if necessary, disable multi-master: */
+ if (priv->rev == TDA19989)
+ reg_set(priv, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
+
+ cec_write(priv, REG_CEC_FRO_IM_CLK_CTRL,
+ CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
+
+ /* initialize the optional IRQ */
+ if (client->irq) {
+ int irqf_trigger;
+
+ /* init read EDID waitqueue */
+ init_waitqueue_head(&priv->wq_edid);
+
+ /* clear pending interrupts */
+ reg_read(priv, REG_INT_FLAGS_0);
+ reg_read(priv, REG_INT_FLAGS_1);
+ reg_read(priv, REG_INT_FLAGS_2);
+
+ irqf_trigger =
+ irqd_get_trigger_type(irq_get_irq_data(client->irq));
+ ret = request_threaded_irq(client->irq, NULL,
+ tda998x_irq_thread,
+ irqf_trigger | IRQF_ONESHOT,
+ "tda998x", priv);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to request IRQ#%u: %d\n",
+ client->irq, ret);
+ goto fail;
+ }
+
+ /* enable HPD irq */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
+ }
+
+ /* enable EDID read irq: */
+ reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ if (!np)
+ return 0; /* non-DT */
+
+ /* get the optional video properties */
+ ret = of_property_read_u32(np, "video-ports", &video);
+ if (ret == 0) {
+ priv->vip_cntrl_0 = video >> 16;
+ priv->vip_cntrl_1 = video >> 8;
+ priv->vip_cntrl_2 = video;
+ }
+
+ return 0;
+
+fail:
+ /* if encoder_init fails, the encoder slave is never registered,
+ * so cleanup here:
+ */
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
+ kfree(priv);
+ encoder_slave->slave_priv = NULL;
+ encoder_slave->slave_funcs = NULL;
+ return -ENXIO;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tda998x_dt_ids[] = {
+ { .compatible = "nxp,tda998x", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
+#endif
+
+static struct i2c_device_id tda998x_ids[] = {
+ { "tda998x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tda998x_ids);
+
+static struct drm_i2c_encoder_driver tda998x_driver = {
+ .i2c_driver = {
+ .probe = tda998x_probe,
+ .remove = tda998x_remove,
+ .driver = {
+ .name = "tda998x",
+ .of_match_table = of_match_ptr(tda998x_dt_ids),
+ },
+ .id_table = tda998x_ids,
+ },
+ .encoder_init = tda998x_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+tda998x_init(void)
+{
+ DBG("");
+ return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
+}
+
+static void __exit
+tda998x_exit(void)
+{
+ DBG("");
+ drm_i2c_encoder_unregister(&tda998x_driver);
+}
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
+MODULE_LICENSE("GPL");
+
+module_init(tda998x_init);
+module_exit(tda998x_exit);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index de32d22a8c3..e88bac1d781 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -30,12 +30,12 @@
*
*/
-#include "drmP.h"
-#include "drm.h"
-#include "i810_drm.h"
+#include <drm/drmP.h>
+#include <drm/i810_drm.h>
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@@ -59,9 +59,8 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
I810_BUF_CLIENT);
- if (used == I810_BUF_FREE) {
+ if (used == I810_BUF_FREE)
return buf;
- }
}
return NULL;
}
@@ -70,7 +69,7 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -93,17 +92,14 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
- lock_kernel();
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
- vma->vm_flags |= (VM_IO | VM_DONTCOPY);
- vma->vm_file = filp;
+ vma->vm_flags |= VM_DONTCOPY;
buf_priv->currently_mapped = I810_BUF_MAPPED;
- unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
@@ -117,10 +113,13 @@ static const struct file_operations i810_buffer_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
- .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
};
-static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
+static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -131,11 +130,11 @@ static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
return -EINVAL;
- down_write(&current->mm->mmap_sem);
+ /* This is all entirely broken */
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+ buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
PROT_READ | PROT_WRITE,
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
@@ -146,12 +145,11 @@ static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
}
- up_write(&current->mm->mmap_sem);
return retcode;
}
-static int i810_unmap_buffer(struct drm_buf * buf)
+static int i810_unmap_buffer(struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -159,11 +157,8 @@ static int i810_unmap_buffer(struct drm_buf * buf)
if (buf_priv->currently_mapped != I810_BUF_MAPPED)
return -EINVAL;
- down_write(&current->mm->mmap_sem);
- retcode = do_munmap(current->mm,
- (unsigned long)buf_priv->virtual,
+ retcode = vm_munmap((unsigned long)buf_priv->virtual,
(size_t) buf->total);
- up_write(&current->mm->mmap_sem);
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
buf_priv->virtual = NULL;
@@ -171,7 +166,7 @@ static int i810_unmap_buffer(struct drm_buf * buf)
return retcode;
}
-static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
+static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
struct drm_file *file_priv)
{
struct drm_buf *buf;
@@ -201,7 +196,7 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
return retcode;
}
-static int i810_dma_cleanup(struct drm_device * dev)
+static int i810_dma_cleanup(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
@@ -217,15 +212,12 @@ static int i810_dma_cleanup(struct drm_device * dev)
drm_i810_private_t *dev_priv =
(drm_i810_private_t *) dev->dev_private;
- if (dev_priv->ring.virtual_start) {
+ if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I810_WRITE(0x02080, 0x1ffff000);
}
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -241,7 +233,7 @@ static int i810_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i810_wait_ring(struct drm_device * dev, int n)
+static int i810_wait_ring(struct drm_device *dev, int n)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -270,11 +262,11 @@ static int i810_wait_ring(struct drm_device * dev, int n)
udelay(1);
}
- out_wait_ring:
+out_wait_ring:
return iters;
}
-static void i810_kernel_lost_context(struct drm_device * dev)
+static void i810_kernel_lost_context(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -286,7 +278,7 @@ static void i810_kernel_lost_context(struct drm_device * dev)
ring->space += ring->Size;
}
-static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
+static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
int my_idx = 24;
@@ -321,9 +313,9 @@ static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_
return 0;
}
-static int i810_dma_initialize(struct drm_device * dev,
- drm_i810_private_t * dev_priv,
- drm_i810_init_t * init)
+static int i810_dma_initialize(struct drm_device *dev,
+ drm_i810_private_t *dev_priv,
+ drm_i810_init_t *init)
{
struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
@@ -461,7 +453,7 @@ static int i810_dma_init(struct drm_device *dev, void *data,
* Use 'volatile' & local var tmp to force the emitted values to be
* identical to the verified ones.
*/
-static void i810EmitContextVerified(struct drm_device * dev,
+static void i810EmitContextVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -494,7 +486,7 @@ static void i810EmitContextVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
+static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -527,7 +519,7 @@ static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i810EmitDestVerified(struct drm_device * dev,
+static void i810EmitDestVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -562,7 +554,7 @@ static void i810EmitDestVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i810EmitState(struct drm_device * dev)
+static void i810EmitState(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -593,7 +585,7 @@ static void i810EmitState(struct drm_device * dev)
/* need to verify
*/
-static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
+static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
unsigned int clear_color,
unsigned int clear_zval)
{
@@ -668,7 +660,7 @@ static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
}
}
-static void i810_dma_dispatch_swap(struct drm_device * dev)
+static void i810_dma_dispatch_swap(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -714,8 +706,8 @@ static void i810_dma_dispatch_swap(struct drm_device * dev)
}
}
-static void i810_dma_dispatch_vertex(struct drm_device * dev,
- struct drm_buf * buf, int discard, int used)
+static void i810_dma_dispatch_vertex(struct drm_device *dev,
+ struct drm_buf *buf, int discard, int used)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -794,7 +786,7 @@ static void i810_dma_dispatch_vertex(struct drm_device * dev,
}
}
-static void i810_dma_dispatch_flip(struct drm_device * dev)
+static void i810_dma_dispatch_flip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int pitch = dev_priv->pitch;
@@ -840,7 +832,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
}
-static void i810_dma_quiescent(struct drm_device * dev)
+static void i810_dma_quiescent(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -857,7 +849,7 @@ static void i810_dma_quiescent(struct drm_device * dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
-static int i810_flush_queue(struct drm_device * dev)
+static int i810_flush_queue(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
@@ -890,7 +882,7 @@ static int i810_flush_queue(struct drm_device * dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device * dev,
+void i810_driver_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
@@ -952,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
dma->buflist[vertex->idx],
vertex->discard, vertex->used);
- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -968,9 +958,8 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
- if (!dev->dev_private) {
+ if (!dev->dev_private)
return -EINVAL;
- }
i810_dma_dispatch_clear(dev, clear->flags,
clear->clear_color, clear->clear_depth);
@@ -1038,7 +1027,7 @@ static int i810_docopy(struct drm_device *dev, void *data,
return 0;
}
-static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
+static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1052,9 +1041,8 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
i810_kernel_lost_context(dev);
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
- if (u != I810_BUF_CLIENT) {
+ if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- }
if (used > 4 * 1024)
used = 0;
@@ -1115,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
mc->last_render);
- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1159,7 +1145,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
- //Tell the overlay to update
+ /* Tell the overlay to update */
I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
return 0;
@@ -1167,7 +1153,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
/* Not sure why this isn't set all the time:
*/
-static void i810_do_init_pageflip(struct drm_device * dev)
+static void i810_do_init_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1177,7 +1163,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i810_do_cleanup_pageflip(struct drm_device * dev)
+static int i810_do_cleanup_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1207,62 +1193,65 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
- /* i810 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
+ /* Our userspace depends upon the agp mapping support. */
+ if (!dev->agp)
+ return -EINVAL;
+
+ pci_set_master(dev->pdev);
return 0;
}
-void i810_driver_lastclose(struct drm_device * dev)
+void i810_driver_lastclose(struct drm_device *dev)
{
i810_dma_cleanup(dev);
}
-void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
+ if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
- }
}
-}
-void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
- struct drm_file *file_priv)
-{
- i810_reclaim_buffers(dev, file_priv);
+ if (file_priv->master && file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ i810_driver_reclaim_buffers(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+ } else {
+ /* master disappeared, clean up stuff anyway and hope nothing
+ * goes wrong */
+ i810_driver_reclaim_buffers(dev, file_priv);
+ }
+
}
-int i810_driver_dma_quiescent(struct drm_device * dev)
+int i810_driver_dma_quiescent(struct drm_device *dev)
{
i810_dma_quiescent(dev);
return 0;
}
-struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
+const struct drm_ioctl_desc i810_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
-int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
+int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
/**
* Determine if the device really is AGP or not.
@@ -1275,7 +1264,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
-int i810_driver_device_is_agp(struct drm_device * dev)
+int i810_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index c1e02752e02..441ccf8f5bd 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -30,46 +30,43 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include "drmP.h"
-#include "drm.h"
-#include "i810_drm.h"
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/i810_drm.h>
#include "i810_drv.h"
-#include "drm_pciids.h"
+#include <drm/drm_pciids.h>
static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
};
+static const struct file_operations i810_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
+ DRIVER_USE_AGP |
+ DRIVER_HAVE_DMA,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
.device_is_agp = i810_driver_device_is_agp,
- .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = i810_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- },
-
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
+ .fops = &i810_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -78,15 +75,24 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i810_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init i810_init(void)
{
+ if (num_possible_cpus() > 1) {
+ pr_err("drm/i810 does not support SMP\n");
+ return -EINVAL;
+ }
driver.num_ioctls = i810_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 21e2691f28f..d4d16eddd65 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -115,56 +115,57 @@ typedef struct drm_i810_private {
} drm_i810_private_t;
/* i810_dma.c */
-extern int i810_driver_dma_quiescent(struct drm_device * dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
- struct drm_file *file_priv);
+extern int i810_driver_dma_quiescent(struct drm_device *dev);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(struct drm_device * dev);
-extern void i810_driver_preclose(struct drm_device * dev,
+extern void i810_driver_lastclose(struct drm_device *dev);
+extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
- struct drm_file *file_priv);
-extern int i810_driver_device_is_agp(struct drm_device * dev);
+extern int i810_driver_device_is_agp(struct drm_device *dev);
-extern struct drm_ioctl_desc i810_ioctls[];
+extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+extern const struct drm_ioctl_desc i810_ioctls[];
extern int i810_max_ioctl;
#define I810_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle)
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
-#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
+#define I810_DEREF(reg) (*(__volatile__ int *)I810_ADDR(reg))
#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
-#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
+#define I810_WRITE(reg, val) do { I810_DEREF(reg) = val; } while (0)
+#define I810_DEREF16(reg) (*(__volatile__ u16 *)I810_ADDR(reg))
#define I810_READ16(reg) I810_DEREF16(reg)
-#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
+#define I810_WRITE16(reg, val) do { I810_DEREF16(reg) = val; } while (0)
#define I810_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
- if (dev_priv->ring.space < n*4) \
- i810_wait_ring(dev, n*4); \
- dev_priv->ring.space -= n*4; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
+ volatile char *virt;
+
+#define BEGIN_LP_RING(n) do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
+ if (dev_priv->ring.space < n*4) \
+ i810_wait_ring(dev, n*4); \
+ dev_priv->ring.space -= n*4; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+ virt = dev_priv->ring.virtual_start; \
} while (0)
-#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+#define ADVANCE_LP_RING() do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
- I810_WRITE(LP_RING + RING_TAIL, outring); \
-} while(0)
-
-#define OUT_RING(n) do { \
- if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outring += 4; \
- outring &= ringmask; \
+ I810_WRITE(LP_RING + RING_TAIL, outring); \
+} while (0)
+
+#define OUT_RING(n) do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
+ *(volatile unsigned int *)(virt + outring) = n; \
+ outring += 4; \
+ outring &= ringmask; \
} while (0)
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
deleted file mode 100644
index 06bd732e646..00000000000
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ /dev/null
@@ -1,1551 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/pagemap.h>
-#include <linux/delay.h>
-#include <asm/uaccess.h>
-
-#define I830_BUF_FREE 2
-#define I830_BUF_CLIENT 1
-#define I830_BUF_HARDWARE 0
-
-#define I830_BUF_UNMAPPED 0
-#define I830_BUF_MAPPED 1
-
-static struct drm_buf *i830_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int used;
-
- /* Linear search might not be the best solution */
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
- I830_BUF_CLIENT);
- if (used == I830_BUF_FREE) {
- return buf;
- }
- }
- return NULL;
-}
-
-/* This should only be called if the buffer is not sent to the hardware
- * yet, the hardware updates in use for us once its on the ring buffer.
- */
-
-static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int used;
-
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
- if (used != I830_BUF_CLIENT) {
- DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- drm_i830_private_t *dev_priv;
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
-
- lock_kernel();
- dev = priv->minor->dev;
- dev_priv = dev->dev_private;
- buf = dev_priv->mmap_buffer;
- buf_priv = buf->dev_private;
-
- vma->vm_flags |= (VM_IO | VM_DONTCOPY);
- vma->vm_file = filp;
-
- buf_priv->currently_mapped = I830_BUF_MAPPED;
- unlock_kernel();
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static const struct file_operations i830_buffer_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = i830_mmap_buffers,
- .fasync = drm_fasync,
-};
-
-static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_private_t *dev_priv = dev->dev_private;
- const struct file_operations *old_fops;
- unsigned long virtual;
- int retcode = 0;
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- old_fops = file_priv->filp->f_op;
- file_priv->filp->f_op = &i830_buffer_fops;
- dev_priv->mmap_buffer = buf;
- virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
- MAP_SHARED, buf->bus_address);
- dev_priv->mmap_buffer = NULL;
- file_priv->filp->f_op = old_fops;
- if (IS_ERR((void *)virtual)) { /* ugh */
- /* Real error */
- DRM_ERROR("mmap error\n");
- retcode = PTR_ERR((void *)virtual);
- buf_priv->virtual = NULL;
- } else {
- buf_priv->virtual = (void __user *)virtual;
- }
- up_write(&current->mm->mmap_sem);
-
- return retcode;
-}
-
-static int i830_unmap_buffer(struct drm_buf * buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int retcode = 0;
-
- if (buf_priv->currently_mapped != I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- retcode = do_munmap(current->mm,
- (unsigned long)buf_priv->virtual,
- (size_t) buf->total);
- up_write(&current->mm->mmap_sem);
-
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
-
- return retcode;
-}
-
-static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
- struct drm_file *file_priv)
-{
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
- int retcode = 0;
-
- buf = i830_freelist_get(dev);
- if (!buf) {
- retcode = -ENOMEM;
- DRM_DEBUG("retcode=%d\n", retcode);
- return retcode;
- }
-
- retcode = i830_map_buffer(buf, file_priv);
- if (retcode) {
- i830_freelist_put(dev, buf);
- DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
- return retcode;
- }
- buf->file_priv = file_priv;
- buf_priv = buf->dev_private;
- d->granted = 1;
- d->request_idx = buf->idx;
- d->request_size = buf->total;
- d->virtual = buf_priv->virtual;
-
- return retcode;
-}
-
-static int i830_dma_cleanup(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- if (dev->dev_private) {
- int i;
- drm_i830_private_t *dev_priv =
- (drm_i830_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
- if (dev_priv->hw_status_page) {
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- dev_priv->hw_status_page,
- dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I830_WRITE(0x02080, 0x1ffff000);
- }
-
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- if (buf_priv->kernel_virtual && buf->total)
- drm_core_ioremapfree(&buf_priv->map, dev);
- }
- }
- return 0;
-}
-
-int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
- int iters = 0;
- unsigned long end;
- unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-
- end = jiffies + (HZ * 3);
- while (ring->space < n) {
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = ring->head;
- }
-
- iters++;
- if (time_before(end, jiffies)) {
- DRM_ERROR("space: %d wanted %d\n", ring->space, n);
- DRM_ERROR("lockup\n");
- goto out_wait_ring;
- }
- udelay(1);
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
- }
-
- out_wait_ring:
- return iters;
-}
-
-static void i830_kernel_lost_context(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head == ring->tail)
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
-}
-
-static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int my_idx = 36;
- u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
- int i;
-
- if (dma->buf_count > 1019) {
- /* Not enough space in the status page for the freelist */
- return -EINVAL;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- buf_priv->in_use = hw_status++;
- buf_priv->my_use_idx = my_idx;
- my_idx += 4;
-
- *buf_priv->in_use = I830_BUF_FREE;
-
- buf_priv->map.offset = buf->bus_address;
- buf_priv->map.size = buf->total;
- buf_priv->map.type = _DRM_AGP;
- buf_priv->map.flags = 0;
- buf_priv->map.mtrr = 0;
-
- drm_core_ioremap(&buf_priv->map, dev);
- buf_priv->kernel_virtual = buf_priv->map.handle;
- }
- return 0;
-}
-
-static int i830_dma_initialize(struct drm_device * dev,
- drm_i830_private_t * dev_priv,
- drm_i830_init_t * init)
-{
- struct drm_map_list *r_list;
-
- memset(dev_priv, 0, sizeof(drm_i830_private_t));
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK) {
- dev_priv->sarea_map = r_list->map;
- break;
- }
- }
-
- if (!dev_priv->sarea_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find dma buffer map!\n");
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i830_sarea_t *)
- ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
-
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = _DRM_AGP;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->w = init->w;
- dev_priv->h = init->h;
- dev_priv->pitch = init->pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->front_offset = init->front_offset;
-
- dev_priv->front_di1 = init->front_offset | init->pitch_bits;
- dev_priv->back_di1 = init->back_offset | init->pitch_bits;
- dev_priv->zi1 = init->depth_offset | init->pitch_bits;
-
- DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
- DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
- DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
- DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
-
- dev_priv->cpp = init->cpp;
- /* We are using separate values as placeholders for mechanisms for
- * private backbuffer/depthbuffer usage.
- */
-
- dev_priv->back_pitch = init->back_pitch;
- dev_priv->depth_pitch = init->depth_pitch;
- dev_priv->do_boxes = 0;
- dev_priv->use_mi_batchbuffer_start = 0;
-
- /* Program Hardware Status Page */
- dev_priv->hw_status_page =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE,
- &dev_priv->dma_status_page);
- if (!dev_priv->hw_status_page) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- I830_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- /* Now we need to init our freelist */
- if (i830_freelist_init(dev, dev_priv) != 0) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Not enough space in the status page for"
- " the freelist\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)dev_priv;
-
- return 0;
-}
-
-static int i830_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv;
- drm_i830_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case I830_INIT_DMA:
- dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i830_dma_initialize(dev, dev_priv, init);
- break;
- case I830_CLEANUP_DMA:
- retcode = i830_dma_cleanup(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define ST1_ENABLE (1<<16)
-#define ST1_MASK (0xffff)
-
-/* Most efficient way to verify state for the i830 is as it is
- * emitted. Non-conformant state is silently dropped.
- */
-static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
-
- for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
- OUT_RING(code[I830_CTXREG_BLENDCOLR]);
- j += 2;
-
- for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
- OUT_RING(code[I830_CTXREG_MCSB1]);
- j += 2;
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
- (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
- (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
-
- BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
-
- OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
- OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
- OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
- OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
- OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
- OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
-
- for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
- } else
- printk("rejected packet %x\n", code[0]);
-}
-
-static void i830EmitTexBlendVerified(struct drm_device * dev,
- unsigned int *code, unsigned int num)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (!num)
- return;
-
- BEGIN_LP_RING(num + 1);
-
- for (i = 0; i < num; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexPalette(struct drm_device * dev,
- unsigned int *palette, int number, int is_shared)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
-
- return;
-
- BEGIN_LP_RING(258);
-
- if (is_shared == 1) {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
- MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
- } else {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
- }
- for (i = 0; i < 256; i++) {
- OUT_RING(palette[i]);
- }
- OUT_RING(0);
- /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
- */
-}
-
-/* Need to do some additional checking when setting the dest buffer.
- */
-static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
-
- tmp = code[I830_DESTREG_CBUFADDR];
- if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
- if (((int)outring) & 8) {
- OUT_RING(0);
- OUT_RING(0);
- }
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_COLOR_BACK |
- BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
- BUF_3D_USE_FENCE);
- OUT_RING(tmp);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
- BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
- OUT_RING(dev_priv->zi1);
- OUT_RING(0);
- } else {
- DRM_ERROR("bad di1 %x (allow %x or %x)\n",
- tmp, dev_priv->front_di1, dev_priv->back_di1);
- }
-
- /* invarient:
- */
-
- OUT_RING(GFX_OP_DESTBUFFER_VARS);
- OUT_RING(code[I830_DESTREG_DV1]);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(code[I830_DESTREG_DR1]);
- OUT_RING(code[I830_DESTREG_DR2]);
- OUT_RING(code[I830_DESTREG_DR3]);
- OUT_RING(code[I830_DESTREG_DR4]);
-
- /* Need to verify this */
- tmp = code[I830_DESTREG_SENABLE];
- if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
- OUT_RING(tmp);
- } else {
- DRM_ERROR("bad scissor enable\n");
- OUT_RING(0);
- }
-
- OUT_RING(GFX_OP_SCISSOR_RECT);
- OUT_RING(code[I830_DESTREG_SR1]);
- OUT_RING(code[I830_DESTREG_SR2]);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- BEGIN_LP_RING(2);
- OUT_RING(GFX_OP_STIPPLE);
- OUT_RING(code[1]);
- ADVANCE_LP_RING();
-}
-
-static void i830EmitState(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("%s %x\n", __func__, dirty);
-
- if (dirty & I830_UPLOAD_BUFFERS) {
- i830EmitDestVerified(dev, sarea_priv->BufferState);
- sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
- }
-
- if (dirty & I830_UPLOAD_CTX) {
- i830EmitContextVerified(dev, sarea_priv->ContextState);
- sarea_priv->dirty &= ~I830_UPLOAD_CTX;
- }
-
- if (dirty & I830_UPLOAD_TEX0) {
- i830EmitTexVerified(dev, sarea_priv->TexState[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
- }
-
- if (dirty & I830_UPLOAD_TEX1) {
- i830EmitTexVerified(dev, sarea_priv->TexState[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND0) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
- sarea_priv->TexBlendStateWordsUsed[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND1) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
- sarea_priv->TexBlendStateWordsUsed[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
- }
-
- if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
- } else {
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
- }
-
- /* 1.3:
- */
-#if 0
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
-#endif
- }
-
- /* 1.3:
- */
- if (dirty & I830_UPLOAD_STIPPLE) {
- i830EmitStippleVerified(dev, sarea_priv->StippleState);
- sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
- }
-
- if (dirty & I830_UPLOAD_TEX2) {
- i830EmitTexVerified(dev, sarea_priv->TexState2);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
- }
-
- if (dirty & I830_UPLOAD_TEX3) {
- i830EmitTexVerified(dev, sarea_priv->TexState3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND2) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState2,
- sarea_priv->TexBlendStateWordsUsed2);
-
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND3) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState3,
- sarea_priv->TexBlendStateWordsUsed3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
- }
-}
-
-/* ================================================================
- * Performance monitoring functions
- */
-
-static void i830_fill_box(struct drm_device * dev,
- int x, int y, int w, int h, int r, int g, int b)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- u32 color;
- unsigned int BR13, CMD;
- RING_LOCALS;
-
- BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
- CMD = XY_COLOR_BLT_CMD;
- x += dev_priv->sarea_priv->boxes[0].x1;
- y += dev_priv->sarea_priv->boxes[0].y1;
-
- if (dev_priv->cpp == 4) {
- BR13 |= (1 << 25);
- CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
- color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
- } else {
- color = (((r & 0xf8) << 8) |
- ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
- }
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((y << 16) | x);
- OUT_RING(((y + h) << 16) | (x + w));
-
- if (dev_priv->current_page == 1) {
- OUT_RING(dev_priv->front_offset);
- } else {
- OUT_RING(dev_priv->back_offset);
- }
-
- OUT_RING(color);
- ADVANCE_LP_RING();
-}
-
-static void i830_cp_performance_boxes(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- /* Purple box for page flipping
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
- i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
-
- /* Red box if we have to wait for idle at any point
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
- i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
-
- /* Blue box: lost context?
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
- i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
-
- /* Yellow box for texture swaps
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
- i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
-
- /* Green box if hardware never idles (as far as we can tell)
- */
- if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
- i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
-
- /* Draw bars indicating number of buffers allocated
- * (not a great measure, easily confused)
- */
- if (dev_priv->dma_used) {
- int bar = dev_priv->dma_used / 10240;
- if (bar > 100)
- bar = 100;
- if (bar < 1)
- bar = 1;
- i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
- dev_priv->dma_used = 0;
- }
-
- dev_priv->sarea_priv->perf_boxes = 0;
-}
-
-static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval,
- unsigned int clear_depthmask)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int BR13, CMD, D_CMD;
- RING_LOCALS;
-
- if (dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(I830_FRONT | I830_BACK);
- if (tmp & I830_FRONT)
- flags |= I830_BACK;
- if (tmp & I830_BACK)
- flags |= I830_FRONT;
- }
-
- i830_kernel_lost_context(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- case 4:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
- CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
- XY_COLOR_BLT_WRITE_RGB);
- D_CMD = XY_COLOR_BLT_CMD;
- if (clear_depthmask & 0x00ffffff)
- D_CMD |= XY_COLOR_BLT_WRITE_RGB;
- if (clear_depthmask & 0xff000000)
- D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
- break;
- default:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- if (flags & I830_FRONT) {
- DRM_DEBUG("clear front\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->front_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_BACK) {
- DRM_DEBUG("clear back\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->back_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_DEPTH) {
- DRM_DEBUG("clear depth\n");
- BEGIN_LP_RING(6);
- OUT_RING(D_CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->depth_offset);
- OUT_RING(clear_zval);
- ADVANCE_LP_RING();
- }
- }
-}
-
-static void i830_dma_dispatch_swap(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int CMD, BR13;
- RING_LOCALS;
-
- DRM_DEBUG("swapbuffers\n");
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes)
- i830_cp_performance_boxes(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- case 4:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
- CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- break;
- default:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
- pbox->x1, pbox->y1, pbox->x2, pbox->y2);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING(BR13 & 0xffff);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset);
- else
- OUT_RING(dev_priv->front_offset);
-
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_dispatch_flip(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes) {
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
- i830_cp_performance_boxes(dev);
- }
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static void i830_dma_dispatch_vertex(struct drm_device * dev,
- struct drm_buf * buf, int discard, int used)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- struct drm_clip_rect *box = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int i = 0, u;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- if (discard) {
- u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
- if (u != I830_BUF_CLIENT) {
- DRM_DEBUG("xxxx 2\n");
- }
- }
-
- if (used > 4 * 1023)
- used = 0;
-
- if (sarea_priv->dirty)
- i830EmitState(dev);
-
- DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
- address, used, nbox);
-
- dev_priv->counter++;
- DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i830_dma_dispatch\n");
- DRM_DEBUG("start : %lx\n", start);
- DRM_DEBUG("used : %d\n", used);
- DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
- u32 *vp = buf_priv->kernel_virtual;
-
- vp[0] = (GFX_OP_PRIMITIVE |
- sarea_priv->vertex_prim | ((used / 4) - 2));
-
- if (dev_priv->use_mi_batchbuffer_start) {
- vp[used / 4] = MI_BATCH_BUFFER_END;
- used += 4;
- }
-
- if (used & 4) {
- vp[used / 4] = 0;
- used += 4;
- }
-
- i830_unmap_buffer(buf);
- }
-
- if (used) {
- do {
- if (i < nbox) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR1]);
- OUT_RING(box[i].x1 | (box[i].y1 << 16));
- OUT_RING(box[i].x2 | (box[i].y2 << 16));
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR4]);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (dev_priv->use_mi_batchbuffer_start) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(start | MI_BATCH_NON_SECURE);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(start | MI_BATCH_NON_SECURE);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- } while (++i < nbox);
- }
-
- if (discard) {
- dev_priv->counter++;
-
- (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I830_BUF_FREE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_quiescent(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-}
-
-static int i830_flush_queue(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- int i, ret = 0;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
- I830_BUF_FREE);
-
- if (used == I830_BUF_HARDWARE)
- DRM_DEBUG("reclaimed from HARDWARE\n");
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("still on client\n");
- }
-
- return ret;
-}
-
-/* Must be called with the lock held */
-static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- if (!dev->dev_private)
- return;
- if (!dma->buflist)
- return;
-
- i830_flush_queue(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv) {
- int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_FREE);
-
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("reclaimed from client\n");
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- }
- }
-}
-
-static int i830_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_flush_queue(dev);
- return 0;
-}
-
-static int i830_dma_vertex(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
- drm_i830_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
- vertex->idx, vertex->used, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
- return -EINVAL;
-
- i830_dma_dispatch_vertex(dev,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i830_clear_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* GH: Someone's doing nasty things... */
- if (!dev->dev_private) {
- return -EINVAL;
- }
-
- i830_dma_dispatch_clear(dev, clear->flags,
- clear->clear_color,
- clear->clear_depth, clear->clear_depthmask);
- return 0;
-}
-
-static int i830_swap_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("i830_swap_bufs\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_dma_dispatch_swap(dev);
- return 0;
-}
-
-/* Not sure why this isn't set all the time:
- */
-static void i830_do_init_pageflip(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static int i830_do_cleanup_pageflip(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- if (dev_priv->current_page != 0)
- i830_dma_dispatch_flip(dev);
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-static int i830_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv->page_flipping)
- i830_do_init_pageflip(dev);
-
- i830_dma_dispatch_flip(dev);
- return 0;
-}
-
-static int i830_getage(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
-}
-
-static int i830_getbuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int retcode = 0;
- drm_i830_dma_t *d = data;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- DRM_DEBUG("getbuf\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- d->granted = 0;
-
- retcode = i830_dma_get_buffer(dev, d, file_priv);
-
- DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
- task_pid_nr(current), retcode, d->granted);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return retcode;
-}
-
-static int i830_copybuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static int i830_docopy(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-
-static int i830_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_getparam_t *param = data;
- int value;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_PARAM_IRQ_ACTIVE:
- value = dev->irq_enabled;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int i830_setparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_setparam_t *param = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
- dev_priv->use_mi_batchbuffer_start = param->value;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i830_driver_load(struct drm_device *dev, unsigned long flags)
-{
- /* i830 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- return 0;
-}
-
-void i830_driver_lastclose(struct drm_device * dev)
-{
- i830_dma_cleanup(dev);
-}
-
-void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_i830_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
- i830_do_cleanup_pageflip(dev);
- }
- }
-}
-
-void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv)
-{
- i830_reclaim_buffers(dev, file_priv);
-}
-
-int i830_driver_dma_quiescent(struct drm_device * dev)
-{
- i830_dma_quiescent(dev);
- return 0;
-}
-
-struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH)
-};
-
-int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
-
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i8xx is AGP.
- */
-int i830_driver_device_is_agp(struct drm_device * dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
deleted file mode 100644
index 44f990bed8f..00000000000
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/* i830_drv.c -- I810 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- * Keith Whitwell <keith@tungstengraphics.com>
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-
-#include "drm_pciids.h"
-
-static struct pci_device_id pciidlist[] = {
- i830_PCI_IDS
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
-#if USE_IRQS
- .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
-#endif
- .dev_priv_size = sizeof(drm_i830_buf_priv_t),
- .load = i830_driver_load,
- .lastclose = i830_driver_lastclose,
- .preclose = i830_driver_preclose,
- .device_is_agp = i830_driver_device_is_agp,
- .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
- .dma_quiescent = i830_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
-#if USE_IRQS
- .irq_preinstall = i830_driver_irq_preinstall,
- .irq_postinstall = i830_driver_irq_postinstall,
- .irq_uninstall = i830_driver_irq_uninstall,
- .irq_handler = i830_driver_irq_handler,
-#endif
- .ioctls = i830_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- },
-
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static int __init i830_init(void)
-{
- driver.num_ioctls = i830_max_ioctl;
- return drm_init(&driver);
-}
-
-static void __exit i830_exit(void)
-{
- drm_exit(&driver);
-}
-
-module_init(i830_init);
-module_exit(i830_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
deleted file mode 100644
index da82afe4ded..00000000000
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
- */
-
-#ifndef _I830_DRV_H_
-#define _I830_DRV_H_
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
-
-#define DRIVER_NAME "i830"
-#define DRIVER_DESC "Intel 830M"
-#define DRIVER_DATE "20021108"
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: ?
- * 1.3: New irq emit/wait ioctls.
- * New pageflip ioctl.
- * New getparam ioctl.
- * State for texunits 3&4 in sarea.
- * New (alternative) layout for texture state.
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 2
-
-/* Driver will work either way: IRQ's save cpu time when waiting for
- * the card, but are subject to subtle interactions between bios,
- * hardware and the driver.
- */
-/* XXX: Add vblank support? */
-#define USE_IRQS 0
-
-typedef struct drm_i830_buf_priv {
- u32 *in_use;
- int my_use_idx;
- int currently_mapped;
- void __user *virtual;
- void *kernel_virtual;
- drm_local_map_t map;
-} drm_i830_buf_priv_t;
-
-typedef struct _drm_i830_ring_buffer {
- int tail_mask;
- unsigned long Start;
- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-} drm_i830_ring_buffer_t;
-
-typedef struct drm_i830_private {
- struct drm_local_map *sarea_map;
- struct drm_local_map *mmio_map;
-
- drm_i830_sarea_t *sarea_priv;
- drm_i830_ring_buffer_t ring;
-
- void *hw_status_page;
- unsigned long counter;
-
- dma_addr_t dma_status_page;
-
- struct drm_buf *mmap_buffer;
-
- u32 front_di1, back_di1, zi1;
-
- int back_offset;
- int depth_offset;
- int front_offset;
- int w, h;
- int pitch;
- int back_pitch;
- int depth_pitch;
- unsigned int cpp;
-
- int do_boxes;
- int dma_used;
-
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-
- int use_mi_batchbuffer_start;
-
-} drm_i830_private_t;
-
-extern struct drm_ioctl_desc i830_ioctls[];
-extern int i830_max_ioctl;
-
-/* i830_irq.c */
-extern int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(struct drm_device * dev);
-extern void i830_driver_irq_postinstall(struct drm_device * dev);
-extern void i830_driver_irq_uninstall(struct drm_device * dev);
-extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(struct drm_device * dev,
- struct drm_file *file_priv);
-extern void i830_driver_lastclose(struct drm_device * dev);
-extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
- struct drm_file *file_priv);
-extern int i830_driver_dma_quiescent(struct drm_device * dev);
-extern int i830_driver_device_is_agp(struct drm_device * dev);
-
-#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I830_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
-
-#define I830_VERBOSE 0
-
-#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I830_VERBOSE) \
- printk("BEGIN_LP_RING(%d)\n", (n)); \
- if (dev_priv->ring.space < n*4) \
- i830_wait_ring(dev, n*4, __func__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I830_WRITE(LP_RING + RING_TAIL, outring); \
-} while(0)
-
-extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
-#define LOAD_TEXTURE_MAP0 (1<<11)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I830REG_HWSTAM 0x02098
-#define I830REG_INT_IDENTITY_R 0x020a4
-#define I830REG_INT_MASK_R 0x020a8
-#define I830REG_INT_ENABLE_R 0x020a0
-
-#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
-
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
-
-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP (1<<22)
-
-#define CMD_3D (0x3<<29)
-#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
-#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
-
-#define BR00_BITBLT_CLIENT 0x40000000
-#define BR00_OP_COLOR_BLT 0x10000000
-#define BR00_OP_SRC_COPY_BLT 0x10C00000
-#define BR13_SOLID_PATTERN 0x80000000
-
-#define BUF_3D_ID_COLOR_BACK (0x3<<24)
-#define BUF_3D_ID_DEPTH (0x7<<24)
-#define BUF_3D_USE_FENCE (1<<23)
-#define BUF_3D_PITCH(x) (((x)/4)<<2)
-
-#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
-#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
-#define MAP_PALETTE_BOTH (1<<11)
-
-#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
-#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
-#define XY_COLOR_BLT_WRITE_RGB (1<<20)
-
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
-#define MI_BATCH_NON_SECURE (1)
-
-#define MI_WAIT_FOR_EVENT ((0x3<<23))
-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-
-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
-
-#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
deleted file mode 100644
index 91ec2bb497e..00000000000
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- *
- * Copyright 2002 Tungsten Graphics, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Keith Whitwell <keith@tungstengraphics.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/delay.h>
-
-irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u16 temp;
-
- temp = I830_READ16(I830REG_INT_IDENTITY_R);
- DRM_DEBUG("%x\n", temp);
-
- if (!(temp & 2))
- return IRQ_NONE;
-
- I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
-
- atomic_inc(&dev_priv->irq_received);
- wake_up_interruptible(&dev_priv->irq_queue);
-
- return IRQ_HANDLED;
-}
-
-static int i830_emit_irq(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s\n", __func__);
-
- atomic_inc(&dev_priv->irq_emitted);
-
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(GFX_OP_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- return atomic_read(&dev_priv->irq_emitted);
-}
-
-static int i830_wait_irq(struct drm_device * dev, int irq_nr)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- DECLARE_WAITQUEUE(entry, current);
- unsigned long end = jiffies + HZ * 3;
- int ret = 0;
-
- DRM_DEBUG("%s\n", __func__);
-
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- return 0;
-
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
-
- add_wait_queue(&dev_priv->irq_queue, &entry);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- break;
- if ((signed)(end - jiffies) <= 0) {
- DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
- I830_READ16(I830REG_INT_IDENTITY_R),
- I830_READ16(I830REG_INT_MASK_R),
- I830_READ16(I830REG_INT_ENABLE_R),
- I830_READ16(I830REG_HWSTAM));
-
- ret = -EBUSY; /* Lockup? Missed irq? */
- break;
- }
- schedule_timeout(HZ * 3);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
-
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev_priv->irq_queue, &entry);
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_emit_t *emit = data;
- int result;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- result = i830_emit_irq(dev);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- return i830_wait_irq(dev, irqwait->irq_seq);
-}
-
-/* drm_dma.h hooks
-*/
-void i830_driver_irq_preinstall(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_HWSTAM, 0xffff);
- I830_WRITE16(I830REG_INT_MASK_R, 0x0);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
- atomic_set(&dev_priv->irq_received, 0);
- atomic_set(&dev_priv->irq_emitted, 0);
- init_waitqueue_head(&dev_priv->irq_queue);
-}
-
-void i830_driver_irq_postinstall(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
-}
-
-void i830_driver_irq_uninstall(struct drm_device * dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- if (!dev_priv)
- return;
-
- I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
-}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 00000000000..437e1824d0b
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,83 @@
+config DRM_I915
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on X86 && PCI
+ depends on (AGP || AGP=n)
+ select INTEL_GTT
+ select AGP_INTEL if AGP
+ select INTERVAL_TREE
+ # we need shmfs for the swappable backing store, and in particular
+ # the shmem_readpage() which depends upon tmpfs
+ select SHMEM
+ select TMPFS
+ select DRM_KMS_HELPER
+ # i915 depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select BACKLIGHT_LCD_SUPPORT if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ select ACPI_BUTTON if ACPI
+ help
+ Choose this option if you have a system that has "Intel Graphics
+ Media Accelerator" or "HD Graphics" integrated graphics,
+ including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+ G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+ Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+ If M is selected, the module will be called i915. AGP support
+ is required for this driver to work. This driver is used by
+ the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+ replaces the older i830 module that supported a subset of the
+ hardware in older X.org releases.
+
+ Note that the older i810/i815 chipsets require the use of the
+ i810 driver instead, and the Atom z5xx series has an entirely
+ different implementation.
+
+config DRM_I915_KMS
+ bool "Enable modesetting on intel by default"
+ depends on DRM_I915
+ default y
+ help
+ Choose this option if you want kernel modesetting enabled by default.
+
+ If in doubt, say "Y".
+
+config DRM_I915_FBDEV
+ bool "Enable legacy fbdev support for the modesetting intel driver"
+ depends on DRM_I915
+ select DRM_KMS_FB_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provide the linux console
+ support on top of the intel modesetting driver.
+
+ If in doubt, say "Y".
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+ bool "Enable preliminary support for prerelease Intel hardware by default"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option if you have prerelease Intel hardware and want the
+ i915 driver to support it by default. You can enable such support at
+ runtime with the module option i915.preliminary_hw_support=1; this
+ option changes the default for that module option.
+
+ If in doubt, say "N".
+
+config DRM_I915_UMS
+ bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
+ depends on DRM_I915 && BROKEN
+ default n
+ help
+ Choose this option if you still need userspace modesetting.
+
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84ec3e..cad1683d8bb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,33 +3,78 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
- i915_debugfs.o \
+
+# Please keep these build lists sorted!
+
+# core driver code
+i915-y := i915_drv.o \
+ i915_params.o \
i915_suspend.o \
- i915_gem.o \
+ i915_sysfs.o \
+ intel_pm.o
+i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
+# GEM code
+i915-y += i915_cmd_parser.o \
+ i915_gem_context.o \
+ i915_gem_render_state.o \
i915_gem_debug.o \
+ i915_gem_dmabuf.o \
+ i915_gem_evict.o \
+ i915_gem_execbuffer.o \
+ i915_gem_gtt.o \
+ i915_gem.o \
+ i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_userptr.o \
+ i915_gpu_error.o \
+ i915_irq.o \
i915_trace_points.o \
+ intel_ringbuffer.o \
+ intel_uncore.o
+
+# autogenerated null render state
+i915-y += intel_renderstate_gen6.o \
+ intel_renderstate_gen7.o \
+ intel_renderstate_gen8.o
+
+# modesetting core code
+i915-y += intel_bios.o \
intel_display.o \
- intel_crt.o \
- intel_lvds.o \
- intel_bios.o \
- intel_dp.o \
- intel_hdmi.o \
- intel_sdvo.o \
intel_modes.o \
- intel_i2c.o \
- intel_fb.o \
- intel_tv.o \
- intel_dvo.o \
intel_overlay.o \
+ intel_sideband.o \
+ intel_sprite.o
+i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
+# modesetting output/encoder code
+i915-y += dvo_ch7017.o \
dvo_ch7xxx.o \
- dvo_ch7017.o \
dvo_ivch.o \
+ dvo_ns2501.o \
+ dvo_sil164.o \
dvo_tfp410.o \
- dvo_sil164.o
+ intel_crt.o \
+ intel_ddi.o \
+ intel_dp.o \
+ intel_dsi_cmd.o \
+ intel_dsi.o \
+ intel_dsi_pll.o \
+ intel_dsi_panel_vbt.o \
+ intel_dvo.o \
+ intel_hdmi.o \
+ intel_i2c.o \
+ intel_lvds.o \
+ intel_panel.o \
+ intel_sdvo.o \
+ intel_tv.o
-i915-$(CONFIG_ACPI) += i915_opregion.o
-i915-$(CONFIG_COMPAT) += i915_ioc32.o
+# legacy horrors
+i915-y += i915_dma.o \
+ i915_ums.o
obj-$(CONFIG_DRM_I915) += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e..312163379db 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -24,26 +24,22 @@
#define _INTEL_DVO_H
#include <linux/i2c.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
#include "intel_drv.h"
struct intel_dvo_device {
- char *name;
+ const char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
- struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
-
- struct drm_display_mode *panel_fixed_mode;
- bool panel_wants_dither;
+ struct i2c_adapter *i2c_bus;
};
struct intel_dvo_dev_ops {
@@ -61,23 +57,12 @@ struct intel_dvo_dev_ops {
void (*create_resources)(struct intel_dvo_device *dvo);
/*
- * Turn on/off output or set intermediate power levels if available.
+ * Turn on/off output.
*
- * Unsupported intermediate modes drop to the lower power setting.
- * If the mode is DPMSModeOff, the output must be disabled,
- * as the DPLL may be disabled afterwards.
- */
- void (*dpms)(struct intel_dvo_device *dvo, int mode);
-
- /*
- * Saves the output's state for restoration on VT switch.
- */
- void (*save)(struct intel_dvo_device *dvo);
-
- /*
- * Restore's the output's state at VT switch.
+ * Because none of our dvo drivers support an intermediate power levels,
+ * we don't expose this in the interfac.
*/
- void (*restore)(struct intel_dvo_device *dvo);
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
/*
* Callback for testing a video mode for a given output.
@@ -92,17 +77,6 @@ struct intel_dvo_dev_ops {
struct drm_display_mode *mode);
/*
- * Callback to adjust the mode to be set in the CRTC.
- *
- * This allows an output to adjust the clock or even the entire set of
- * timings, which is used for panels with fixed timings or for
- * buses with clock limitations.
- */
- bool (*mode_fixup)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- /*
* Callback for preparing mode changes on an output
*/
void (*prepare)(struct intel_dvo_device *dvo);
@@ -128,6 +102,12 @@ struct intel_dvo_dev_ops {
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
/**
* Query the device for the modes it provides.
*
@@ -153,5 +133,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87..86b27d1d90c 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,82 +159,50 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t save_hapi;
- uint8_t save_vali;
- uint8_t save_valo;
- uint8_t save_ailo;
- uint8_t save_lvds_pll_vco;
- uint8_t save_feedback_div;
- uint8_t save_lvds_control_2;
- uint8_t save_outputs_enable;
- uint8_t save_lvds_power_down;
- uint8_t save_power_management;
+ uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- u8 out_buf[2];
- u8 in_buf[2];
-
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = in_buf,
+ .buf = val,
}
};
-
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
- *val= in_buf[0];
- return true;
- };
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
}
-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- uint8_t out_buf[2];
+ uint8_t buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
- .buf = out_buf,
+ .buf = buf,
};
-
- out_buf[0] = addr;
- out_buf[1] = val;
-
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
- return true;
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
}
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
- uint8_t val;
+ const char *str;
+ u8 val;
priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
if (priv == NULL)
@@ -246,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
goto fail;
- if (val != CH7017_DEVICE_ID_VALUE &&
- val != CH7018_DEVICE_ID_VALUE &&
- val != CH7019_DEVICE_ID_VALUE) {
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
- "Slave %d.\n",
- val, i2cbus->adapter.name,dvo->slave_addr);
+ "slave %d.\n",
+ val, adapter->name, dvo->slave_addr);
goto fail;
}
+ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+ str, adapter->name, dvo->slave_addr);
return true;
+
fail:
kfree(priv);
return false;
@@ -263,7 +242,7 @@ fail:
static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
{
- return connector_status_unknown;
+ return connector_status_connected;
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
@@ -330,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+ ch7017_dpms(dvo, false);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -352,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
}
/* set the CH7017 power state */
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t val;
@@ -366,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
- if (mode == DRM_MODE_DPMS_ON) {
+ if (enable) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -377,7 +356,19 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
}
/* XXX: Should actually wait for update power status somehow */
- udelay(20000);
+ msleep(20);
+}
+
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
}
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
@@ -401,39 +392,6 @@ do { \
DUMP(CH7017_LVDS_POWER_DOWN);
}
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- /* Power down before changing mode */
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
@@ -450,8 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
.dump_regs = ch7017_dump_regs,
- .save = ch7017_save,
- .restore = ch7017_restore,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b..80449f47596 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -32,12 +32,14 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define CH7xxx_REG_DID 0x4b
#define CH7011_VID 0x83 /* 7010 as well */
+#define CH7010B_VID 0x05
#define CH7009A_VID 0x84
#define CH7009B_VID 0x85
#define CH7301_VID 0x95
#define CH7xxx_VID 0x84
#define CH7xxx_DID 0x17
+#define CH7010_DID 0x16
#define CH7xxx_NUM_REGS 0x4c
@@ -87,26 +89,24 @@ static struct ch7xxx_id_struct {
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
+ { CH7010B_VID, "CH7010B" },
{ CH7009A_VID, "CH7009A" },
{ CH7009B_VID, "CH7009B" },
{ CH7301_VID, "CH7301" },
};
-struct ch7xxx_reg_state {
- uint8_t regs[CH7xxx_NUM_REGS];
+static struct ch7xxx_did_struct {
+ uint8_t did;
+ char *name;
+} ch7xxx_dids[] = {
+ { CH7xxx_DID, "CH7XXX" },
+ { CH7010_DID, "CH7010B" },
};
struct ch7xxx_priv {
bool quiet;
-
- struct ch7xxx_reg_state save_reg;
- struct ch7xxx_reg_state mode_reg;
- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
- uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
@@ -119,12 +119,23 @@ static char *ch7xxx_get_id(uint8_t vid)
return NULL;
}
+static char *ch7xxx_get_did(uint8_t did)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
+ if (ch7xxx_dids[i].did == did)
+ return ch7xxx_dids[i].name;
+ }
+
+ return NULL;
+}
+
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -146,14 +157,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
- };
+ }
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -163,7 +174,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -175,12 +185,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
@@ -192,7 +202,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
uint8_t vendor, device;
- char *name;
+ char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
if (ch7xxx == NULL)
@@ -217,7 +227,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
goto out;
- if (device != CH7xxx_DID) {
+ devid = ch7xxx_get_did(device);
+ if (!devid) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
@@ -296,56 +307,43 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- idf |= CH7xxx_IDF_HSP;
+ idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
/* set the CH7xxx power state */
-static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
{
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
-static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
- int i;
+ u8 val;
- for (i = 0; i < CH7xxx_NUM_REGS; i++) {
- if ((i % 8) == 0 )
- DRM_LOG_KMS("\n %02X: ", i);
- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
- }
-}
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
+ if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
+ return true;
+ else
+ return false;
}
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ int i;
- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
+ for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
+ if ((i % 8) == 0)
+ DRM_DEBUG_KMS("\n %02X: ", i);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_DEBUG_KMS("%02X ", val);
+ }
}
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
@@ -364,8 +362,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
.dump_regs = ch7xxx_dump_regs,
- .save = ch7xxx_save,
- .restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0..0f2587ff347 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
bool quiet;
uint16_t width, height;
-
- uint16_t save_VR01;
- uint16_t save_VR40;
};
@@ -170,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
@@ -196,15 +192,15 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
out_buf[0] = addr;
- if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+ if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
- };
+ }
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
"%s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -214,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -227,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
out_buf[1] = data & 0xff;
out_buf[2] = data >> 8;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
@@ -293,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
}
/** Sets the power state of the panel connected to the ivch */
-static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
@@ -302,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR01, &vr01))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -320,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR30, &vr30))
break;
- if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
break;
udelay(1000);
}
@@ -328,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
udelay(16 * 1000);
}
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint16_t vr01;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -349,8 +358,8 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
(adjusted_mode->vdisplay - 1)) >> 2;
- ivch_write (dvo, VR42, x_ratio);
- ivch_write (dvo, VR41, y_ratio);
+ ivch_write(dvo, VR42, x_ratio);
+ ivch_write(dvo, VR41, y_ratio);
} else {
vr01 &= ~VR01_PANEL_FIT_ENABLE;
vr40 &= ~VR40_CLOCK_GATING_ENABLE;
@@ -368,57 +377,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
uint16_t val;
ivch_read(dvo, VR00, &val);
- DRM_LOG_KMS("VR00: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
- DRM_LOG_KMS("VR01: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
- DRM_LOG_KMS("VR30: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
- DRM_LOG_KMS("VR40: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR40: 0x%04x\n", val);
/* GPIO registers */
ivch_read(dvo, VR80, &val);
- DRM_LOG_KMS("VR80: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
- DRM_LOG_KMS("VR81: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
- DRM_LOG_KMS("VR82: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
- DRM_LOG_KMS("VR83: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
- DRM_LOG_KMS("VR84: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
- DRM_LOG_KMS("VR85: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
- DRM_LOG_KMS("VR86: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
- DRM_LOG_KMS("VR87: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
- DRM_LOG_KMS("VR88: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR88: 0x%04x\n", val);
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
- DRM_LOG_KMS("VR8E: 0x%04x\n", val);
+ DRM_DEBUG_KMS("VR8E: 0x%04x\n", val);
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
- DRM_LOG_KMS("VR8F: 0x%04x\n", val);
-}
-
-static void ivch_save(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_read(dvo, VR01, &priv->save_VR01);
- ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_write(dvo, VR01, priv->save_VR01);
- ivch_write(dvo, VR40, priv->save_VR40);
+ DRM_DEBUG_KMS("VR8F: 0x%04x\n", val);
}
static void ivch_destroy(struct intel_dvo_device *dvo)
@@ -431,11 +424,10 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
}
}
-struct intel_dvo_dev_ops ivch_ops= {
+struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
- .save = ivch_save,
- .restore = ivch_restore,
+ .get_hw_state = ivch_get_hw_state,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 00000000000..74f2af7c2d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,517 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+ //I2CDevRec d;
+ bool quiet;
+ int reg_8_shadow;
+ int reg_8_set;
+ // Shadow registers for i915
+ int dvoc;
+ int pll_a;
+ int srcdim;
+ int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+ adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1) {
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+ ns->reg_8_set = 0;
+ ns->reg_8_shadow =
+ NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(ns);
+ return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+ (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ok;
+ int retries = 10;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS
+ ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
+
+ /*
+ * Where do I find the native resolution for which scaling is not required???
+ *
+ * First trigger the DVO on as otherwise the chip does not appear on the i2c
+ * bus.
+ */
+ do {
+ ok = true;
+
+ if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+ /* mode 277 */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+ DRM_DEBUG_KMS("switching to 800x600\n");
+
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
+ ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
+ ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0x27);
+ ok &= ns2501_writeb(dvo, 0x81, 0x03);
+ ok &= ns2501_writeb(dvo, 0x82, 0x41);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ /* mode 274 */
+ DRM_DEBUG_KMS("switching to 640x480\n");
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+ ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+ ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+ ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0xff);
+ ok &= ns2501_writeb(dvo, 0x81, 0x07);
+ ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+ /* mode 280 */
+ DRM_DEBUG_KMS("switching to 1024x768\n");
+ /*
+ * This might or might not work, actually. I'm silently
+ * assuming here that the native panel resolution is
+ * 1024x768. If not, then this leaves the scaler disabled
+ * generating a picture that is likely not the expected.
+ *
+ * Problem is that I do not know where to take the panel
+ * dimensions from.
+ *
+ * Enable the bypass, scaling not required.
+ *
+ * The scaler registers are irrelevant here....
+ *
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ ok &= ns2501_writeb(dvo, 0x37, 0x44);
+ } else {
+ /*
+ * Data not known. Bummer!
+ * Hopefully, the code should not go here
+ * as mode_OK delivered no other modes.
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ }
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+ } while (!ok && retries--);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ if (ch & NS2501_8_PD)
+ return true;
+ else
+ return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ bool ok;
+ int retries = 10;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ unsigned char ch;
+
+ DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
+
+ ch = ns->reg_8_shadow;
+
+ if (enable)
+ ch |= NS2501_8_PD;
+ else
+ ch &= ~NS2501_8_PD;
+
+ if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+ ns->reg_8_set = 1;
+ ns->reg_8_shadow = ch;
+
+ do {
+ ok = true;
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+ ok &=
+ ns2501_writeb(dvo, 0x34,
+ enable ? 0x03 : 0x00);
+ ok &=
+ ns2501_writeb(dvo, 0x35,
+ enable ? 0xff : 0x00);
+ } while (!ok && retries--);
+ }
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+ DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+ DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG8, &val);
+ DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG9, &val);
+ DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REGC, &val);
+ DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ kfree(ns);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .dump_regs = ns2501_dump_regs,
+ .destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a8..fa011496707 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_REGC 0x0c
-struct sil164_save_rec {
- uint8_t reg8;
- uint8_t reg9;
- uint8_t regc;
-};
-
struct sil164_priv {
//I2CDevRec d;
bool quiet;
- struct sil164_save_rec save_regs;
- struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -77,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -99,23 +90,22 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
- };
+ }
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
- struct sil164_priv *sil= dvo->dev_priv;
+ struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -127,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
@@ -218,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
}
/* set the SIL164 power state */
-static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
{
int ret;
unsigned char ch;
@@ -227,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
if (ret == false)
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch |= SIL164_8_PD;
else
ch &= ~SIL164_8_PD;
@@ -236,48 +226,35 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
return;
}
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
- DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
- DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
- DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
- DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
+ DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
- DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
-}
-
-static void sil164_save(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil= dvo->dev_priv;
-
- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
- return;
-
- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
- return;
-
- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
- return;
-
- return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil = dvo->dev_priv;
-
- /* Restore it powered down initially */
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
+ DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_destroy(struct intel_dvo_device *dvo)
@@ -296,8 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_valid = sil164_mode_valid,
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
.dump_regs = sil164_dump_regs,
- .save = sil164_save,
- .restore = sil164_restore,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116..7853719a0e8 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -56,7 +56,7 @@
#define TFP410_CTL_2_MDI (1<<0)
#define TFP410_CTL_3 0x0A
-#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
#define TFP410_CTL_3_DK (1<<5)
#define TFP410_CTL_3_DKEN (1<<4)
#define TFP410_CTL_3_CTL_MASK (0x7<<1)
@@ -86,23 +86,14 @@
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
-struct tfp410_save_rec {
- uint8_t ctl1;
- uint8_t ctl2;
-};
-
struct tfp410_priv {
bool quiet;
-
- struct tfp410_save_rec saved_reg;
- struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -124,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
- };
+ }
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -140,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -152,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
@@ -216,7 +206,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
uint8_t ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
- if (ctl2 & TFP410_CTL_2_HTPLG)
+ if (ctl2 & TFP410_CTL_2_RSEN)
ret = connector_status_connected;
else
ret = connector_status_disconnected;
@@ -235,23 +225,23 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- /* As long as the basics are set up, since we don't have clock dependencies
- * in the mode setup, we can just leave the registers alone and everything
- * will work fine.
- */
- /* don't do much */
- return;
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
}
/* set the tfp410 power state */
-static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ctl1 |= TFP410_CTL_1_PD;
else
ctl1 &= ~TFP410_CTL_1_PD;
@@ -259,60 +249,51 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
}
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
- DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
- DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
- DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
- DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
- DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
- DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
- DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
- DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+ DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
- DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
- DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
- DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
- DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
-}
-
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
- return;
-
- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
- return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- /* Restore it powered down initially */
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
+ DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_destroy(struct intel_dvo_device *dvo)
@@ -331,8 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_valid = tfp410_mode_valid,
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
.dump_regs = tfp410_dump_regs,
- .save = tfp410_save,
- .restore = tfp410_restore,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
new file mode 100644
index 00000000000..9d7954366bd
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -0,0 +1,1061 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Brad Volkin <bradley.d.volkin@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: batch buffer command parser
+ *
+ * Motivation:
+ * Certain OpenGL features (e.g. transform feedback, performance monitoring)
+ * require userspace code to submit batches containing commands such as
+ * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
+ * generations of the hardware will noop these commands in "unsecure" batches
+ * (which includes all userspace batches submitted via i915) even though the
+ * commands may be safe and represent the intended programming model of the
+ * device.
+ *
+ * The software command parser is similar in operation to the command parsing
+ * done in hardware for unsecure batches. However, the software parser allows
+ * some operations that would be noop'd by hardware, if the parser determines
+ * the operation is safe, and submits the batch as "secure" to prevent hardware
+ * parsing.
+ *
+ * Threats:
+ * At a high level, the hardware (and software) checks attempt to prevent
+ * granting userspace undue privileges. There are three categories of privilege.
+ *
+ * First, commands which are explicitly defined as privileged or which should
+ * only be used by the kernel driver. The parser generally rejects such
+ * commands, though it may allow some from the drm master process.
+ *
+ * Second, commands which access registers. To support correct/enhanced
+ * userspace functionality, particularly certain OpenGL extensions, the parser
+ * provides a whitelist of registers which userspace may safely access (for both
+ * normal and drm master processes).
+ *
+ * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
+ * The parser always rejects such commands.
+ *
+ * The majority of the problematic commands fall in the MI_* range, with only a
+ * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ *
+ * Implementation:
+ * Each ring maintains tables of commands and registers which the parser uses in
+ * scanning batch buffers submitted to that ring.
+ *
+ * Since the set of commands that the parser must check for is significantly
+ * smaller than the number of commands supported, the parser tables contain only
+ * those commands required by the parser. This generally works because command
+ * opcode ranges have standard command length encodings. So for commands that
+ * the parser does not need to check, it can easily skip them. This is
+ * implementated via a per-ring length decoding vfunc.
+ *
+ * Unfortunately, there are a number of commands that do not follow the standard
+ * length encoding for their opcode range, primarily amongst the MI_* commands.
+ * To handle this, the parser provides a way to define explicit "skip" entries
+ * in the per-ring command tables.
+ *
+ * Other command table entries map fairly directly to high level categories
+ * mentioned above: rejected, master-only, register whitelist. The parser
+ * implements a number of checks, including the privileged memory checks, via a
+ * general bitmasking mechanism.
+ */
+
+#define STD_MI_OPCODE_MASK 0xFF800000
+#define STD_3D_OPCODE_MASK 0xFFFF0000
+#define STD_2D_OPCODE_MASK 0xFFC00000
+#define STD_MFX_OPCODE_MASK 0xFFFF0000
+
+#define CMD(op, opm, f, lm, fl, ...) \
+ { \
+ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
+ .cmd = { (op), (opm) }, \
+ .length = { (lm) }, \
+ __VA_ARGS__ \
+ }
+
+/* Convenience macros to compress the tables */
+#define SMI STD_MI_OPCODE_MASK
+#define S3D STD_3D_OPCODE_MASK
+#define S2D STD_2D_OPCODE_MASK
+#define SMFX STD_MFX_OPCODE_MASK
+#define F true
+#define S CMD_DESC_SKIP
+#define R CMD_DESC_REJECT
+#define W CMD_DESC_REGISTER
+#define B CMD_DESC_BITMASK
+#define M CMD_DESC_MASTER
+
+/* Command Mask Fixed Len Action
+ ---------------------------------------------------------- */
+static const struct drm_i915_cmd_descriptor common_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+ CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
+ CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
+ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
+ .reg = { .offset = 1, .mask = 0x007FFFFC },
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
+ .reg = { .offset = 1, .mask = 0x007FFFFC },
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
+};
+
+static const struct drm_i915_cmd_descriptor render_cmds[] = {
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_PREDICATE, SMI, F, 1, S ),
+ CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
+ CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
+ CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
+ CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 1,
+ .mask = MI_REPORT_PERF_COUNT_GGTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
+ CMD( PIPELINE_SELECT, S3D, F, 1, S ),
+ CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
+ .bits = {{
+ .offset = 2,
+ .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
+ .expected = 0,
+ }}, ),
+ CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
+ CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
+ CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
+ .bits = {{
+ .offset = 1,
+ .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX),
+ .expected = 0,
+ .condition_offset = 1,
+ .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
+ }}, ),
+};
+
+static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
+ CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
+ CMD( MI_RS_CONTROL, SMI, F, 1, S ),
+ CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
+ CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
+ CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
+ CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
+ CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
+ CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
+ CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
+
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
+};
+
+static const struct drm_i915_cmd_descriptor video_cmds[] = {
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_FLUSH_DW_NOTIFY,
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = MI_FLUSH_DW_USE_GTT,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ },
+ {
+ .offset = 0,
+ .mask = MI_FLUSH_DW_STORE_INDEX,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ }}, ),
+ CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ /*
+ * MFX_WAIT doesn't fit the way we handle length for most commands.
+ * It has a length field but it uses a non-standard length bias.
+ * It is always 1 dword though, so just treat it as fixed length.
+ */
+ CMD( MFX_WAIT, SMFX, F, 1, S ),
+};
+
+static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_FLUSH_DW_NOTIFY,
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = MI_FLUSH_DW_USE_GTT,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ },
+ {
+ .offset = 0,
+ .mask = MI_FLUSH_DW_STORE_INDEX,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ }}, ),
+ CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+};
+
+static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+ CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_FLUSH_DW_NOTIFY,
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = MI_FLUSH_DW_USE_GTT,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ },
+ {
+ .offset = 0,
+ .mask = MI_FLUSH_DW_STORE_INDEX,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ }}, ),
+ CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
+ CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
+};
+
+static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
+};
+
+#undef CMD
+#undef SMI
+#undef S3D
+#undef S2D
+#undef SMFX
+#undef F
+#undef S
+#undef R
+#undef W
+#undef B
+#undef M
+
+static const struct drm_i915_cmd_table gen7_render_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { render_cmds, ARRAY_SIZE(render_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { render_cmds, ARRAY_SIZE(render_cmds) },
+ { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
+};
+
+static const struct drm_i915_cmd_table gen7_video_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { video_cmds, ARRAY_SIZE(video_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+};
+
+static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { blt_cmds, ARRAY_SIZE(blt_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { blt_cmds, ARRAY_SIZE(blt_cmds) },
+ { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
+};
+
+/*
+ * Register whitelists, sorted by increasing register offset.
+ *
+ * Some registers that userspace accesses are 64 bits. The register
+ * access commands only allow 32-bit accesses. Hence, we have to include
+ * entries for both halves of the 64-bit registers.
+ */
+
+/* Convenience macro for adding 64-bit registers */
+#define REG64(addr) (addr), (addr + sizeof(u32))
+
+static const u32 gen7_render_regs[] = {
+ REG64(HS_INVOCATION_COUNT),
+ REG64(DS_INVOCATION_COUNT),
+ REG64(IA_VERTICES_COUNT),
+ REG64(IA_PRIMITIVES_COUNT),
+ REG64(VS_INVOCATION_COUNT),
+ REG64(GS_INVOCATION_COUNT),
+ REG64(GS_PRIMITIVES_COUNT),
+ REG64(CL_INVOCATION_COUNT),
+ REG64(CL_PRIMITIVES_COUNT),
+ REG64(PS_INVOCATION_COUNT),
+ REG64(PS_DEPTH_COUNT),
+ OACONTROL, /* Only allowed for LRI and SRM. See below. */
+ GEN7_3DPRIM_END_OFFSET,
+ GEN7_3DPRIM_START_VERTEX,
+ GEN7_3DPRIM_VERTEX_COUNT,
+ GEN7_3DPRIM_INSTANCE_COUNT,
+ GEN7_3DPRIM_START_INSTANCE,
+ GEN7_3DPRIM_BASE_VERTEX,
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
+ GEN7_SO_WRITE_OFFSET(0),
+ GEN7_SO_WRITE_OFFSET(1),
+ GEN7_SO_WRITE_OFFSET(2),
+ GEN7_SO_WRITE_OFFSET(3),
+};
+
+static const u32 gen7_blt_regs[] = {
+ BCS_SWCTRL,
+};
+
+static const u32 ivb_master_regs[] = {
+ FORCEWAKE_MT,
+ DERRMR,
+ GEN7_PIPE_DE_LOAD_SL(PIPE_A),
+ GEN7_PIPE_DE_LOAD_SL(PIPE_B),
+ GEN7_PIPE_DE_LOAD_SL(PIPE_C),
+};
+
+static const u32 hsw_master_regs[] = {
+ FORCEWAKE_MT,
+ DERRMR,
+};
+
+#undef REG64
+
+static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 subclient =
+ (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_RC_CLIENT) {
+ if (subclient == INSTR_MEDIA_SUBCLIENT)
+ return 0xFFFF;
+ else
+ return 0xFF;
+ }
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 subclient =
+ (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_RC_CLIENT) {
+ if (subclient == INSTR_MEDIA_SUBCLIENT)
+ return 0xFFF;
+ else
+ return 0xFF;
+ }
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT)
+ return 0x3F;
+ else if (client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
+static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_table *cmd_tables,
+ int cmd_table_count)
+{
+ int i;
+ bool ret = true;
+
+ if (!cmd_tables || cmd_table_count == 0)
+ return true;
+
+ for (i = 0; i < cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &cmd_tables[i];
+ u32 previous = 0;
+ int j;
+
+ for (j = 0; j < table->count; j++) {
+ const struct drm_i915_cmd_descriptor *desc =
+ &table->table[i];
+ u32 curr = desc->cmd.value & desc->cmd.mask;
+
+ if (curr < previous) {
+ DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+ ring->id, i, j, curr, previous);
+ ret = false;
+ }
+
+ previous = curr;
+ }
+ }
+
+ return ret;
+}
+
+static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
+{
+ int i;
+ u32 previous = 0;
+ bool ret = true;
+
+ for (i = 0; i < reg_count; i++) {
+ u32 curr = reg_table[i];
+
+ if (curr < previous) {
+ DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
+ ring_id, i, curr, previous);
+ ret = false;
+ }
+
+ previous = curr;
+ }
+
+ return ret;
+}
+
+static bool validate_regs_sorted(struct intel_engine_cs *ring)
+{
+ return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
+ check_sorted(ring->id, ring->master_reg_table,
+ ring->master_reg_count);
+}
+
+struct cmd_node {
+ const struct drm_i915_cmd_descriptor *desc;
+ struct hlist_node node;
+};
+
+/*
+ * Different command ranges have different numbers of bits for the opcode. For
+ * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
+ * problem is that, for example, MI commands use bits 22:16 for other fields
+ * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
+ * we mask a command from a batch it could hash to the wrong bucket due to
+ * non-opcode bits being set. But if we don't include those bits, some 3D
+ * commands may hash to the same bucket due to not including opcode bits that
+ * make the command unique. For now, we will risk hashing to the same bucket.
+ *
+ * If we attempt to generate a perfect hash, we should be able to look at bits
+ * 31:29 of a command from a batch buffer and use the full mask for that
+ * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
+ */
+#define CMD_HASH_MASK STD_MI_OPCODE_MASK
+
+static int init_hash_table(struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_table *cmd_tables,
+ int cmd_table_count)
+{
+ int i, j;
+
+ hash_init(ring->cmd_hash);
+
+ for (i = 0; i < cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &cmd_tables[i];
+
+ for (j = 0; j < table->count; j++) {
+ const struct drm_i915_cmd_descriptor *desc =
+ &table->table[j];
+ struct cmd_node *desc_node =
+ kmalloc(sizeof(*desc_node), GFP_KERNEL);
+
+ if (!desc_node)
+ return -ENOMEM;
+
+ desc_node->desc = desc;
+ hash_add(ring->cmd_hash, &desc_node->node,
+ desc->cmd.value & CMD_HASH_MASK);
+ }
+ }
+
+ return 0;
+}
+
+static void fini_hash_table(struct intel_engine_cs *ring)
+{
+ struct hlist_node *tmp;
+ struct cmd_node *desc_node;
+ int i;
+
+ hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+ hash_del(&desc_node->node);
+ kfree(desc_node);
+ }
+}
+
+/**
+ * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * @ring: the ringbuffer to initialize
+ *
+ * Optionally initializes fields related to batch buffer command parsing in the
+ * struct intel_engine_cs based on whether the platform requires software
+ * command parsing.
+ *
+ * Return: non-zero if initialization fails
+ */
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
+{
+ const struct drm_i915_cmd_table *cmd_tables;
+ int cmd_table_count;
+ int ret;
+
+ if (!IS_GEN7(ring->dev))
+ return 0;
+
+ switch (ring->id) {
+ case RCS:
+ if (IS_HASWELL(ring->dev)) {
+ cmd_tables = hsw_render_ring_cmds;
+ cmd_table_count =
+ ARRAY_SIZE(hsw_render_ring_cmds);
+ } else {
+ cmd_tables = gen7_render_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ }
+
+ ring->reg_table = gen7_render_regs;
+ ring->reg_count = ARRAY_SIZE(gen7_render_regs);
+
+ if (IS_HASWELL(ring->dev)) {
+ ring->master_reg_table = hsw_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ } else {
+ ring->master_reg_table = ivb_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ }
+
+ ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+ break;
+ case VCS:
+ cmd_tables = gen7_video_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+ ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ case BCS:
+ if (IS_HASWELL(ring->dev)) {
+ cmd_tables = hsw_blt_ring_cmds;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ } else {
+ cmd_tables = gen7_blt_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ }
+
+ ring->reg_table = gen7_blt_regs;
+ ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
+
+ if (IS_HASWELL(ring->dev)) {
+ ring->master_reg_table = hsw_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ } else {
+ ring->master_reg_table = ivb_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ }
+
+ ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ break;
+ case VECS:
+ cmd_tables = hsw_vebox_cmds;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+ /* VECS can use the same length_mask function as VCS */
+ ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ break;
+ default:
+ DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
+ ring->id);
+ BUG();
+ }
+
+ BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
+ BUG_ON(!validate_regs_sorted(ring));
+
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
+ }
+
+ ring->needs_cmd_parser = true;
+
+ return 0;
+}
+
+/**
+ * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
+ * @ring: the ringbuffer to clean up
+ *
+ * Releases any resources related to command parsing that may have been
+ * initialized for the specified ring.
+ */
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+{
+ if (!ring->needs_cmd_parser)
+ return;
+
+ fini_hash_table(ring);
+}
+
+static const struct drm_i915_cmd_descriptor*
+find_cmd_in_table(struct intel_engine_cs *ring,
+ u32 cmd_header)
+{
+ struct cmd_node *desc_node;
+
+ hash_for_each_possible(ring->cmd_hash, desc_node, node,
+ cmd_header & CMD_HASH_MASK) {
+ const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
+ u32 masked_cmd = desc->cmd.mask & cmd_header;
+ u32 masked_value = desc->cmd.value & desc->cmd.mask;
+
+ if (masked_cmd == masked_value)
+ return desc;
+ }
+
+ return NULL;
+}
+
+/*
+ * Returns a pointer to a descriptor for the command specified by cmd_header.
+ *
+ * The caller must supply space for a default descriptor via the default_desc
+ * parameter. If no descriptor for the specified command exists in the ring's
+ * command parser tables, this function fills in default_desc based on the
+ * ring's default length encoding and returns default_desc.
+ */
+static const struct drm_i915_cmd_descriptor*
+find_cmd(struct intel_engine_cs *ring,
+ u32 cmd_header,
+ struct drm_i915_cmd_descriptor *default_desc)
+{
+ const struct drm_i915_cmd_descriptor *desc;
+ u32 mask;
+
+ desc = find_cmd_in_table(ring, cmd_header);
+ if (desc)
+ return desc;
+
+ mask = ring->get_cmd_length_mask(cmd_header);
+ if (!mask)
+ return NULL;
+
+ BUG_ON(!default_desc);
+ default_desc->flags = CMD_DESC_SKIP;
+ default_desc->length.mask = mask;
+
+ return default_desc;
+}
+
+static bool valid_reg(const u32 *table, int count, u32 addr)
+{
+ if (table && count != 0) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (table[i] == addr)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+{
+ int i;
+ void *addr = NULL;
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+
+ pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ if (pages == NULL) {
+ DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+ goto finish;
+ }
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ pages[i] = sg_page_iter_page(&sg_iter);
+ i++;
+ }
+
+ addr = vmap(pages, i, 0, PAGE_KERNEL);
+ if (addr == NULL) {
+ DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+ goto finish;
+ }
+
+finish:
+ if (pages)
+ drm_free_large(pages);
+ return (u32*)addr;
+}
+
+/**
+ * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * @ring: the ring in question
+ *
+ * Only certain platforms require software batch buffer command parsing, and
+ * only when enabled via module paramter.
+ *
+ * Return: true if the ring requires software command parsing
+ */
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!ring->needs_cmd_parser)
+ return false;
+
+ /*
+ * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
+ * disabled. That will cause all of the parser's PPGTT checks to
+ * fail. For now, disable parsing when PPGTT is off.
+ */
+ if (!dev_priv->mm.aliasing_ppgtt)
+ return false;
+
+ return (i915.enable_cmd_parser == 1);
+}
+
+static bool check_cmd(const struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_descriptor *desc,
+ const u32 *cmd,
+ const bool is_master,
+ bool *oacontrol_set)
+{
+ if (desc->flags & CMD_DESC_REJECT) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+ return false;
+ }
+
+ if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+ DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+ *cmd);
+ return false;
+ }
+
+ if (desc->flags & CMD_DESC_REGISTER) {
+ u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
+
+ /*
+ * OACONTROL requires some special handling for writes. We
+ * want to make sure that any batch which enables OA also
+ * disables it before the end of the batch. The goal is to
+ * prevent one process from snooping on the perf data from
+ * another process. To do that, we need to check the value
+ * that will be written to the register. Hence, limit
+ * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
+ */
+ if (reg_addr == OACONTROL) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
+ return false;
+
+ if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
+ *oacontrol_set = (cmd[2] != 0);
+ }
+
+ if (!valid_reg(ring->reg_table,
+ ring->reg_count, reg_addr)) {
+ if (!is_master ||
+ !valid_reg(ring->master_reg_table,
+ ring->master_reg_count,
+ reg_addr)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+ reg_addr,
+ *cmd,
+ ring->id);
+ return false;
+ }
+ }
+ }
+
+ if (desc->flags & CMD_DESC_BITMASK) {
+ int i;
+
+ for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
+ u32 dword;
+
+ if (desc->bits[i].mask == 0)
+ break;
+
+ if (desc->bits[i].condition_mask != 0) {
+ u32 offset =
+ desc->bits[i].condition_offset;
+ u32 condition = cmd[offset] &
+ desc->bits[i].condition_mask;
+
+ if (condition == 0)
+ continue;
+ }
+
+ dword = cmd[desc->bits[i].offset] &
+ desc->bits[i].mask;
+
+ if (dword != desc->bits[i].expected) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+ *cmd,
+ desc->bits[i].mask,
+ desc->bits[i].expected,
+ dword, ring->id);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+#define LENGTH_BIAS 2
+
+/**
+ * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ring: the ring on which the batch is to execute
+ * @batch_obj: the batch buffer in question
+ * @batch_start_offset: byte offset in the batch at which execution starts
+ * @is_master: is the submitting process the drm master?
+ *
+ * Parses the specified batch buffer looking for privilege violations as
+ * described in the overview.
+ *
+ * Return: non-zero if the parser finds violations or otherwise fails
+ */
+int i915_parse_cmds(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ bool is_master)
+{
+ int ret = 0;
+ u32 *cmd, *batch_base, *batch_end;
+ struct drm_i915_cmd_descriptor default_desc = { 0 };
+ int needs_clflush = 0;
+ bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
+
+ ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ return ret;
+ }
+
+ batch_base = vmap_batch(batch_obj);
+ if (!batch_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+ i915_gem_object_unpin_pages(batch_obj);
+ return -ENOMEM;
+ }
+
+ if (needs_clflush)
+ drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
+
+ cmd = batch_base + (batch_start_offset / sizeof(*cmd));
+ batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+ while (cmd < batch_end) {
+ const struct drm_i915_cmd_descriptor *desc;
+ u32 length;
+
+ if (*cmd == MI_BATCH_BUFFER_END)
+ break;
+
+ desc = find_cmd(ring, *cmd, &default_desc);
+ if (!desc) {
+ DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
+ *cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (desc->flags & CMD_DESC_FIXED)
+ length = desc->length.fixed;
+ else
+ length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+
+ if ((batch_end - cmd) < length) {
+ DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
+ *cmd,
+ length,
+ batch_end - cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ cmd += length;
+ }
+
+ if (oacontrol_set) {
+ DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
+ ret = -EINVAL;
+ }
+
+ if (cmd >= batch_end) {
+ DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ ret = -EINVAL;
+ }
+
+ vunmap(batch_base);
+
+ i915_gem_object_unpin_pages(batch_obj);
+
+ return ret;
+}
+
+/**
+ * i915_cmd_parser_get_version() - get the cmd parser version number
+ *
+ * The cmd parser maintains a simple increasing integer version number suitable
+ * for passing to userspace clients to determine what operations are permitted.
+ *
+ * Return: the current version number of the cmd parser
+ */
+int i915_cmd_parser_get_version(void)
+{
+ /*
+ * Command parser version history
+ *
+ * 1. Initial version. Checks batches and reports violations, but leaves
+ * hardware parsing enabled (so does not allow new use cases).
+ */
+ return 1;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c9998c4dce..b8c689202c4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,152 +27,753 @@
*/
#include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
#include <linux/debugfs.h>
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/list_sort.h>
+#include <asm/msr-index.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include "intel_ringbuffer.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define DRM_I915_RING_DEBUG 1
+enum {
+ ACTIVE_LIST,
+ INACTIVE_LIST,
+ PINNED_LIST,
+};
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
-#if defined(CONFIG_DEBUG_FS)
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
-#define ACTIVE_LIST 1
-#define FLUSHING_LIST 2
-#define INACTIVE_LIST 3
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ const struct intel_device_info *info = INTEL_INFO(dev);
+
+ seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define SEP_SEMICOLON ;
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+#undef PRINT_FLAG
+#undef SEP_SEMICOLON
+
+ return 0;
+}
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj_priv->user_pin_count > 0)
+ if (obj->user_pin_count > 0)
return "P";
- else if (obj_priv->pin_count > 0)
+ else if (i915_gem_obj_is_pinned(obj))
return "p";
else
return " ";
}
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+{
+ switch (obj->tiling_mode) {
+ default:
+ case I915_TILING_NONE: return " ";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ }
+}
+
+static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
+{
+ return obj->has_global_gtt_mapping ? "g" : " ";
+}
+
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int pin_count = 0;
+
+ seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ get_global_flag(obj),
+ obj->base.size / 1024,
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
+ obj->last_fenced_seqno,
+ i915_cache_level_str(obj->cache_level),
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ seq_printf(m, " (name: %d)", obj->base.name);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->pin_count > 0)
+ pin_count++;
+ seq_printf(m, " (pinned x %d)", pin_count);
+ if (obj->pin_display)
+ seq_printf(m, " (display)");
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", obj->fence_reg);
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!i915_is_ggtt(vma->vm))
+ seq_puts(m, " (pp");
+ else
+ seq_puts(m, " (g");
+ seq_printf(m, "gtt offset: %08lx, size: %08lx)",
+ vma->node.start, vma->node.size);
+ }
+ if (obj->stolen)
+ seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ seq_printf(m, " (%s mappable)", s);
+ }
+ if (obj->ring != NULL)
+ seq_printf(m, " (%s)", obj->ring->name);
+}
+
+static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
- switch (obj_priv->tiling_mode) {
- default:
- case I915_TILING_NONE: return " ";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
- }
+ seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+ seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+ seq_putc(m, ' ');
}
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- spinlock_t *lock = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_vma *vma;
+ size_t total_obj_size, total_gtt_size;
+ int count, ret;
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ /* FIXME: the user of this interface might want more than just GGTT */
switch (list) {
case ACTIVE_LIST:
- seq_printf(m, "Active:\n");
- lock = &dev_priv->mm.active_list_lock;
- head = &dev_priv->mm.active_list;
+ seq_puts(m, "Active:\n");
+ head = &vm->active_list;
break;
case INACTIVE_LIST:
- seq_printf(m, "Inactive:\n");
- head = &dev_priv->mm.inactive_list;
- break;
- case FLUSHING_LIST:
- seq_printf(m, "Flushing:\n");
- head = &dev_priv->mm.flushing_list;
+ seq_puts(m, "Inactive:\n");
+ head = &vm->inactive_list;
break;
default:
- DRM_INFO("Ooops, unexpected list\n");
- return 0;
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- if (lock)
- spin_lock(lock);
- list_for_each_entry(obj_priv, head, list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
-
- seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- obj,
- get_pin_flag(obj_priv),
- obj->size,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno,
- obj_priv->dirty ? " dirty" : "",
- obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
- if (obj_priv->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
-
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(vma, head, mm_list) {
+ seq_printf(m, " ");
+ describe_obj(m, vma->obj);
seq_printf(m, "\n");
+ total_obj_size += vma->obj->base.size;
+ total_gtt_size += vma->node.size;
+ count++;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+ return 0;
+}
+
+static int obj_rank_by_stolen(void *priv,
+ struct list_head *A, struct list_head *B)
+{
+ struct drm_i915_gem_object *a =
+ container_of(A, struct drm_i915_gem_object, obj_exec_link);
+ struct drm_i915_gem_object *b =
+ container_of(B, struct drm_i915_gem_object, obj_exec_link);
+
+ return a->stolen->start - b->stolen->start;
+}
+
+static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ LIST_HEAD(stolen);
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (obj->stolen == NULL)
+ continue;
+
+ list_add(&obj->obj_exec_link, &stolen);
+
+ total_obj_size += obj->base.size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ count++;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (obj->stolen == NULL)
+ continue;
+
+ list_add(&obj->obj_exec_link, &stolen);
+
+ total_obj_size += obj->base.size;
+ count++;
+ }
+ list_sort(NULL, &stolen, obj_rank_by_stolen);
+ seq_puts(m, "Stolen:\n");
+ while (!list_empty(&stolen)) {
+ obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ list_del_init(&obj->obj_exec_link);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+ return 0;
+}
+
+#define count_objects(list, member) do { \
+ list_for_each_entry(obj, list, member) { \
+ size += i915_gem_obj_ggtt_size(obj); \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += i915_gem_obj_ggtt_size(obj); \
+ ++mappable_count; \
+ } \
+ } \
+} while (0)
+
+struct file_stats {
+ struct drm_i915_file_private *file_priv;
+ int count;
+ size_t total, unbound;
+ size_t global, shared;
+ size_t active, inactive;
+};
+
+static int per_file_stats(int id, void *ptr, void *data)
+{
+ struct drm_i915_gem_object *obj = ptr;
+ struct file_stats *stats = data;
+ struct i915_vma *vma;
+
+ stats->count++;
+ stats->total += obj->base.size;
+
+ if (obj->base.name || obj->base.dma_buf)
+ stats->shared += obj->base.size;
+
+ if (USES_FULL_PPGTT(obj->base.dev)) {
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ struct i915_hw_ppgtt *ppgtt;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ if (i915_is_ggtt(vma->vm)) {
+ stats->global += obj->base.size;
+ continue;
+ }
+
+ ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
+ if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
+ continue;
+
+ if (obj->ring) /* XXX per-vma statistic */
+ stats->active += obj->base.size;
+ else
+ stats->inactive += obj->base.size;
+
+ return 0;
+ }
+ } else {
+ if (i915_gem_obj_ggtt_bound(obj)) {
+ stats->global += obj->base.size;
+ if (obj->ring)
+ stats->active += obj->base.size;
+ else
+ stats->inactive += obj->base.size;
+ return 0;
+ }
+ }
+
+ if (!list_empty(&obj->global_list))
+ stats->unbound += obj->base.size;
+
+ return 0;
+}
+
+#define count_vmas(list, member) do { \
+ list_for_each_entry(vma, list, member) { \
+ size += i915_gem_obj_ggtt_size(vma->obj); \
+ ++count; \
+ if (vma->obj->map_and_fenceable) { \
+ mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
+ ++mappable_count; \
+ } \
+ } \
+} while (0)
+
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 count, mappable_count, purgeable_count;
+ size_t size, mappable_size, purgeable_size;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct drm_file *file;
+ struct i915_vma *vma;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "%u objects, %zu bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.object_memory);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.bound_list, global_list);
+ seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_vmas(&vm->active_list, mm_list);
+ seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_vmas(&vm->inactive_list, mm_list);
+ seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = purgeable_size = purgeable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ size += obj->base.size, ++count;
+ if (obj->madv == I915_MADV_DONTNEED)
+ purgeable_size += obj->base.size, ++purgeable_count;
+ }
+ seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
+ size = count = mappable_size = mappable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (obj->fault_mappable) {
+ size += i915_gem_obj_ggtt_size(obj);
+ ++count;
+ }
+ if (obj->pin_mappable) {
+ mappable_size += i915_gem_obj_ggtt_size(obj);
+ ++mappable_count;
+ }
+ if (obj->madv == I915_MADV_DONTNEED) {
+ purgeable_size += obj->base.size;
+ ++purgeable_count;
+ }
+ }
+ seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ purgeable_count, purgeable_size);
+ seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ mappable_count, mappable_size);
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+ seq_printf(m, "%zu [%lu] gtt total\n",
+ dev_priv->gtt.base.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+
+ seq_putc(m, '\n');
+ list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+ struct file_stats stats;
+ struct task_struct *task;
+
+ memset(&stats, 0, sizeof(stats));
+ stats.file_priv = file->driver_priv;
+ spin_lock(&file->table_lock);
+ idr_for_each(&file->object_idr, per_file_stats, &stats);
+ spin_unlock(&file->table_lock);
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
+ seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
+ task ? task->comm : "<unknown>",
+ stats.count,
+ stats.total,
+ stats.active,
+ stats.inactive,
+ stats.global,
+ stats.shared,
+ stats.unbound);
+ rcu_read_unlock();
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_gem_gtt_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ uintptr_t list = (uintptr_t) node->info_ent->data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
+ continue;
+
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ total_obj_size += obj->base.size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ count++;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+
+ return 0;
+}
+
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ unsigned long flags;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc) {
+ const char pipe = pipe_name(crtc->pipe);
+ const char plane = plane_name(crtc->plane);
+ struct intel_unpin_work *work;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = crtc->unpin_work;
+ if (work == NULL) {
+ seq_printf(m, "No flip due on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
+ pipe, plane);
+ }
+ if (work->enable_stall_check)
+ seq_puts(m, "Stall check enabled, ");
+ else
+ seq_puts(m, "Stall check waiting for page flip ioctl, ");
+ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
+
+ if (work->old_fb_obj) {
+ struct drm_i915_gem_object *obj = work->old_fb_obj;
+ if (obj)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
+ }
+ if (work->pending_flip_obj) {
+ struct drm_i915_gem_object *obj = work->pending_flip_obj;
+ if (obj)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
- if (lock)
- spin_unlock(lock);
return 0;
}
static int i915_gem_request_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
struct drm_i915_gem_request *gem_request;
+ int ret, count, i;
- seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ count = 0;
+ for_each_ring(ring, dev_priv, i) {
+ if (list_empty(&ring->request_list))
+ continue;
+
+ seq_printf(m, "%s requests:\n", ring->name);
+ list_for_each_entry(gem_request,
+ &ring->request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
}
+ mutex_unlock(&dev->struct_mutex);
+
+ if (count == 0)
+ seq_puts(m, "No requests\n");
+
return 0;
}
+static void i915_ring_seqno_info(struct seq_file *m,
+ struct intel_engine_cs *ring)
+{
+ if (ring->get_seqno) {
+ seq_printf(m, "Current sequence (%s): %u\n",
+ ring->name, ring->get_seqno(ring, false));
+ }
+}
+
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int ret, i;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_seqno_info(m, ring);
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
- if (dev_priv->hw_status_page != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
return 0;
}
static int i915_interrupt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int ret, i, pipe;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ if (IS_CHERRYVIEW(dev)) {
+ int i;
+ seq_printf(m, "Master Interrupt Control:\t%08x\n",
+ I915_READ(GEN8_MASTER_IRQ));
+
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ for (i = 0; i < 4; i++) {
+ seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IMR(i)));
+ seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IIR(i)));
+ seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IER(i)));
+ }
- if (!IS_IRONLAKE(dev)) {
+ seq_printf(m, "PCU interrupt mask:\t%08x\n",
+ I915_READ(GEN8_PCU_IMR));
+ seq_printf(m, "PCU interrupt identity:\t%08x\n",
+ I915_READ(GEN8_PCU_IIR));
+ seq_printf(m, "PCU interrupt enable:\t%08x\n",
+ I915_READ(GEN8_PCU_IER));
+ } else if (INTEL_INFO(dev)->gen >= 8) {
+ seq_printf(m, "Master Interrupt Control:\t%08x\n",
+ I915_READ(GEN8_MASTER_IRQ));
+
+ for (i = 0; i < 4; i++) {
+ seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IMR(i)));
+ seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IIR(i)));
+ seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IER(i)));
+ }
+
+ for_each_pipe(pipe) {
+ seq_printf(m, "Pipe %c IMR:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IMR(pipe)));
+ seq_printf(m, "Pipe %c IIR:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IIR(pipe)));
+ seq_printf(m, "Pipe %c IER:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IER(pipe)));
+ }
+
+ seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IMR));
+ seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IIR));
+ seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IER));
+
+ seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IMR));
+ seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IIR));
+ seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IER));
+
+ seq_printf(m, "PCU interrupt mask:\t%08x\n",
+ I915_READ(GEN8_PCU_IMR));
+ seq_printf(m, "PCU interrupt identity:\t%08x\n",
+ I915_READ(GEN8_PCU_IIR));
+ seq_printf(m, "PCU interrupt enable:\t%08x\n",
+ I915_READ(GEN8_PCU_IER));
+ } else if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Master IER:\t%08x\n",
+ I915_READ(VLV_MASTER_IER));
+
+ seq_printf(m, "Render IER:\t%08x\n",
+ I915_READ(GTIER));
+ seq_printf(m, "Render IIR:\t%08x\n",
+ I915_READ(GTIIR));
+ seq_printf(m, "Render IMR:\t%08x\n",
+ I915_READ(GTIMR));
+
+ seq_printf(m, "PM IER:\t\t%08x\n",
+ I915_READ(GEN6_PMIER));
+ seq_printf(m, "PM IIR:\t\t%08x\n",
+ I915_READ(GEN6_PMIIR));
+ seq_printf(m, "PM IMR:\t\t%08x\n",
+ I915_READ(GEN6_PMIMR));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ } else if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
- seq_printf(m, "Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- seq_printf(m, "Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat: %08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
} else {
seq_printf(m, "North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
@@ -193,65 +794,60 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- seq_printf(m, "Interrupts received: %d\n",
- atomic_read(&dev_priv->irq_received));
- if (dev_priv->hw_status_page != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
+ for_each_ring(ring, dev_priv, i) {
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m,
+ "Graphics Interrupt mask (%s): %08x\n",
+ ring->name, I915_READ_IMR(ring));
+ }
+ i915_ring_seqno_info(m, ring);
}
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
-
- if (obj == NULL) {
- seq_printf(m, "Fenced object[%2d] = unused\n", i);
- } else {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = obj->driver_private;
- seq_printf(m, "Fenced object[%2d] = %p: %s "
- "%08x %08zx %08x %s %08x %08x %d",
- i, obj, get_pin_flag(obj_priv),
- obj_priv->gtt_offset,
- obj->size, obj_priv->stride,
- get_tiling_flag(obj_priv),
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
- seq_printf(m, "\n");
- }
+ struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+ seq_printf(m, "Fence %d, pin count = %d, object = ",
+ i, dev_priv->fence_regs[i].pin_count);
+ if (obj == NULL)
+ seq_puts(m, "unused");
+ else
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_hws_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ const u32 *hws;
int i;
- volatile u32 *hws;
- hws = (volatile u32 *)dev_priv->hw_status_page;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ hws = ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -263,257 +859,3029 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct i915_error_state_file_priv *error_priv = filp->private_data;
+ struct drm_device *dev = error_priv->dev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ struct i915_error_state_file_priv *error_priv;
+
+ error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+ if (!error_priv)
+ return -ENOMEM;
+
+ error_priv->dev = dev;
+
+ i915_error_state_get(dev, error_priv);
+
+ file->private_data = error_priv;
+
+ return 0;
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+ struct i915_error_state_file_priv *error_priv = file->private_data;
+
+ i915_error_state_put(error_priv);
+ kfree(error_priv);
+
+ return 0;
+}
+
+static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *pos)
+{
+ struct i915_error_state_file_priv *error_priv = file->private_data;
+ struct drm_i915_error_state_buf error_str;
+ loff_t tmp_pos = 0;
+ ssize_t ret_count = 0;
+ int ret;
+
+ ret = i915_error_state_buf_init(&error_str, count, *pos);
+ if (ret)
+ return ret;
+
+ ret = i915_error_state_to_str(&error_str, error_priv);
+ if (ret)
+ goto out;
+
+ ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
+ error_str.buf,
+ error_str.bytes);
+
+ if (ret_count < 0)
+ ret = ret_count;
+ else
+ *pos = error_str.start + ret_count;
+out:
+ i915_error_state_buf_release(&error_str);
+ return ret ?: ret_count;
+}
+
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = i915_error_state_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = i915_error_state_release,
+};
+
+static int
+i915_next_seqno_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ *val = dev_priv->next_seqno;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int
+i915_next_seqno_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_set_seqno(dev, val);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
+ i915_next_seqno_get, i915_next_seqno_set,
+ "0x%llx\n");
+
+static int i915_rstdby_delays(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 crstanddelay;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ crstanddelay = I915_READ16(CRSTANDVID);
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
+
+ return 0;
+}
+
+static int i915_frequency_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ intel_runtime_pm_get(dev_priv);
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ if (IS_GEN5(dev)) {
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rpmodectl, rpinclimit, rpdeclimit;
+ u32 rpstat, cagf, reqf;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
+ int max_freq;
+
+ /* RPSTAT1 is in the GT power well */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ goto out;
+
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ reqf *= GT_FREQUENCY_MULTIPLIER;
+
+ rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = I915_READ(GEN6_RPSTAT1);
+ rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+ rpcurup = I915_READ(GEN6_RP_CUR_UP);
+ rpprevup = I915_READ(GEN6_RP_PREV_UP);
+ rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+ rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+ rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+ if (IS_HASWELL(dev))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ cagf *= GT_FREQUENCY_MULTIPLIER;
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
+ I915_READ(GEN6_PMIER),
+ I915_READ(GEN6_PMIMR),
+ I915_READ(GEN6_PMISR),
+ I915_READ(GEN6_PMIIR),
+ I915_READ(GEN6_PMINTRMSK));
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & 0xff00) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+ seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
+ seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
+ seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+ GEN6_CURICONT_MASK);
+ seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+ GEN6_CURIAVG_MASK);
+ seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+ GEN6_CURBSYTAVG_MASK);
+
+ max_freq = (rp_state_cap & 0xff0000) >> 16;
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ max_freq * GT_FREQUENCY_MULTIPLIER);
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ max_freq * GT_FREQUENCY_MULTIPLIER);
+
+ max_freq = rp_state_cap & 0xff;
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ max_freq * GT_FREQUENCY_MULTIPLIER);
+
+ seq_printf(m, "Max overclocked frequency: %dMHz\n",
+ dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
+ } else if (IS_VALLEYVIEW(dev)) {
+ u32 freq_sts, val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+ seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
+
+ val = valleyview_rps_max_freq(dev_priv);
+ seq_printf(m, "max GPU freq: %d MHz\n",
+ vlv_gpu_freq(dev_priv, val));
+
+ val = valleyview_rps_min_freq(dev_priv);
+ seq_printf(m, "min GPU freq: %d MHz\n",
+ vlv_gpu_freq(dev_priv, val));
+
+ seq_printf(m, "current GPU freq: %d MHz\n",
+ vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ } else {
+ seq_puts(m, "no P-state info available\n");
+ }
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+static int i915_delayfreq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 delayfreq;
+ int ret, i;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ for (i = 0; i < 16; i++) {
+ delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
+ seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+ (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static inline int MAP_TO_MV(int map)
{
- int page, i;
- uint32_t *mem;
+ return 1250 - (map * 25);
+}
+
+static int i915_inttoext_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 inttoext;
+ int ret, i;
- for (page = 0; page < page_count; page++) {
- mem = kmap_atomic(pages[page], KM_USER0);
- for (i = 0; i < PAGE_SIZE; i += 4)
- seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(mem, KM_USER0);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ for (i = 1; i <= 32; i++) {
+ inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
+ seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
}
-static int i915_batchbuffer_info(struct seq_file *m, void *data)
+static int ironlake_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl, rstdbyctl;
+ u16 crstandvid;
int ret;
- spin_lock(&dev_priv->mm.active_list_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ rgvmodectl = I915_READ(MEMMODECTL);
+ rstdbyctl = I915_READ(RSTDBYCTL);
+ crstandvid = I915_READ16(CRSTANDVID);
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
+ "yes" : "no");
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
+ seq_printf(m, "SW control enabled: %s\n",
+ rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
+ seq_printf(m, "Gated voltage change: %s\n",
+ rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max P-state: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+ seq_puts(m, "Current RS state: ");
+ switch (rstdbyctl & RSX_STATUS_MASK) {
+ case RSX_STATUS_ON:
+ seq_puts(m, "on\n");
+ break;
+ case RSX_STATUS_RC1:
+ seq_puts(m, "RC1\n");
+ break;
+ case RSX_STATUS_RC1E:
+ seq_puts(m, "RC1E\n");
+ break;
+ case RSX_STATUS_RS1:
+ seq_puts(m, "RS1\n");
+ break;
+ case RSX_STATUS_RS2:
+ seq_puts(m, "RS2 (RC6)\n");
+ break;
+ case RSX_STATUS_RS3:
+ seq_puts(m, "RC3 (RC6+)\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- obj = obj_priv->obj;
- if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- DRM_ERROR("Failed to get pages: %d\n", ret);
- spin_unlock(&dev_priv->mm.active_list_lock);
- return ret;
- }
+ return 0;
+}
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
- i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
+static int vlv_drpc_info(struct seq_file *m)
+{
- i915_gem_object_put_pages(obj);
- }
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, rcctl1;
+ unsigned fw_rendercount = 0, fw_mediacount = 0;
+
+ intel_runtime_pm_get(dev_priv);
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+
+ intel_runtime_pm_put(dev_priv);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "Turbo enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Render Power Well: %s\n",
+ (I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+ seq_printf(m, "Render RC6 residency since boot: %u\n",
+ I915_READ(VLV_GT_RENDER_RC6));
+ seq_printf(m, "Media RC6 residency since boot: %u\n",
+ I915_READ(VLV_GT_MEDIA_RC6));
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ fw_rendercount = dev_priv->uncore.fw_rendercount;
+ fw_mediacount = dev_priv->uncore.fw_mediacount;
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
+ seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
+
+
+ return 0;
+}
+
+
+static int gen6_drpc_info(struct seq_file *m)
+{
+
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+ unsigned forcewake_count;
+ int count = 0, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ if (forcewake_count) {
+ seq_puts(m, "RC information inaccurate because somebody "
+ "holds a forcewake reference \n");
+ } else {
+ /* NB: we cannot use forcewake, else we read the wrong values */
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+ seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
}
- spin_unlock(&dev_priv->mm.active_list_lock);
+ gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_runtime_pm_put(dev_priv);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ seq_printf(m, "RC1e Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_puts(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_puts(m, "Core Power Down\n");
+ else
+ seq_puts(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_puts(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_puts(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_puts(m, "RC7\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+ seq_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+ /* Not exactly sure what this is */
+ seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+ seq_printf(m, "RC6 residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6));
+ seq_printf(m, "RC6+ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6p));
+ seq_printf(m, "RC6++ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6pp));
+
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0;
}
-static int i915_ringbuffer_data(struct seq_file *m, void *data)
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ if (IS_VALLEYVIEW(dev))
+ return vlv_drpc_info(m);
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
+ return gen6_drpc_info(m);
+ else
+ return ironlake_drpc_info(m);
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- u8 *virt;
- uint32_t *ptr, off;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev_priv->ring.ring_obj) {
- seq_printf(m, "No ringbuffer setup\n");
+ if (!HAS_FBC(dev)) {
+ seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
- virt = dev_priv->ring.virtual_start;
+ intel_runtime_pm_get(dev_priv);
+
+ if (intel_fbc_enabled(dev)) {
+ seq_puts(m, "FBC enabled\n");
+ } else {
+ seq_puts(m, "FBC disabled: ");
+ switch (dev_priv->fbc.no_fbc_reason) {
+ case FBC_OK:
+ seq_puts(m, "FBC actived, but currently disabled in hardware");
+ break;
+ case FBC_UNSUPPORTED:
+ seq_puts(m, "unsupported by this chipset");
+ break;
+ case FBC_NO_OUTPUT:
+ seq_puts(m, "no outputs");
+ break;
+ case FBC_STOLEN_TOO_SMALL:
+ seq_puts(m, "not enough stolen memory");
+ break;
+ case FBC_UNSUPPORTED_MODE:
+ seq_puts(m, "mode not supported");
+ break;
+ case FBC_MODE_TOO_LARGE:
+ seq_puts(m, "mode too large");
+ break;
+ case FBC_BAD_PLANE:
+ seq_puts(m, "FBC unsupported on plane");
+ break;
+ case FBC_NOT_TILED:
+ seq_puts(m, "scanout buffer not tiled");
+ break;
+ case FBC_MULTIPLE_PIPES:
+ seq_puts(m, "multiple pipes are enabled");
+ break;
+ case FBC_MODULE_PARAM:
+ seq_puts(m, "disabled per module param (default off)");
+ break;
+ case FBC_CHIP_DEFAULT:
+ seq_puts(m, "disabled per chip default");
+ break;
+ default:
+ seq_puts(m, "unknown reason");
+ }
+ seq_putc(m, '\n');
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- for (off = 0; off < dev_priv->ring.Size; off += 4) {
- ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
+ if (!HAS_IPS(dev)) {
+ seq_puts(m, "not supported\n");
+ return 0;
}
+ intel_runtime_pm_get(dev_priv);
+
+ if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "enabled\n");
+ else
+ seq_puts(m, "disabled\n");
+
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
-static int i915_ringbuffer_info(struct seq_file *m, void *data)
+static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned int head, tail;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool sr_enabled = false;
+
+ intel_runtime_pm_get(dev_priv);
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ if (HAS_PCH_SPLIT(dev))
+ sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev))
+ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev))
+ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- seq_printf(m, "RingHead : %08x\n", head);
- seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+ intel_runtime_pm_put(dev_priv);
+
+ seq_printf(m, "self-refresh: %s\n",
+ sr_enabled ? "enabled" : "disabled");
return 0;
}
-static int i915_error_state(struct seq_file *m, void *unused)
+static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long temp, chipset, gfx;
+ int ret;
+
+ if (!IS_GEN5(dev))
+ return -ENODEV;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (!dev_priv->first_error) {
- seq_printf(m, "no error state collected\n");
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ temp = i915_mch_val(dev_priv);
+ chipset = i915_chipset_val(dev_priv);
+ gfx = i915_gfx_val(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
+
+ return 0;
+}
+
+static int i915_ring_freq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+ int gpu_freq, ia_freq;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ seq_puts(m, "unsupported on this chipset\n");
+ return 0;
+ }
+
+ intel_runtime_pm_get(dev_priv);
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ if (ret)
goto out;
+
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+ for (gpu_freq = dev_priv->rps.min_freq_softlimit;
+ gpu_freq <= dev_priv->rps.max_freq_softlimit;
+ gpu_freq++) {
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(dev_priv,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq);
+ seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+ gpu_freq * GT_FREQUENCY_MULTIPLIER,
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
}
- error = dev_priv->first_error;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+static int i915_gfxec(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+ intel_runtime_pm_put(dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
+ int ret;
+
+ if (data == NULL)
+ return -ENOMEM;
- seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
- seq_printf(m, "EIR: 0x%08x\n", error->eir);
- seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
- seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
- if (IS_I965G(dev)) {
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ goto out;
+
+ if (opregion->header) {
+ memcpy_fromio(data, opregion->header, OPREGION_SIZE);
+ seq_write(m, data, OPREGION_SIZE);
}
+ mutex_unlock(&dev->struct_mutex);
+
out:
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ kfree(data);
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_fbdev *ifbdev = NULL;
+ struct intel_framebuffer *fb;
+
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ifbdev = dev_priv->fbdev;
+ fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
+ describe_obj(m, fb->obj);
+ seq_putc(m, '\n');
+ mutex_unlock(&dev->mode_config.mutex);
+#endif
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ if (ifbdev && &fb->base == ifbdev->helper.fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
+ describe_obj(m, fb->obj);
+ seq_putc(m, '\n');
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
-static int
-i915_wedged_open(struct inode *inode,
- struct file *filp)
+static int i915_context_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
+ int ret, i;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ if (dev_priv->ips.pwrctx) {
+ seq_puts(m, "power context ");
+ describe_obj(m, dev_priv->ips.pwrctx);
+ seq_putc(m, '\n');
+ }
+
+ if (dev_priv->ips.renderctx) {
+ seq_puts(m, "render context ");
+ describe_obj(m, dev_priv->ips.renderctx);
+ seq_putc(m, '\n');
+ }
+
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ if (ctx->obj == NULL)
+ continue;
+
+ seq_puts(m, "HW context ");
+ describe_ctx(m, ctx);
+ for_each_ring(ring, dev_priv, i)
+ if (ring->default_context == ctx)
+ seq_printf(m, "(default context %s) ", ring->name);
+
+ describe_obj(m, ctx->obj);
+ seq_putc(m, '\n');
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
+static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
{
- filp->private_data = inode->i_private;
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ if (IS_VALLEYVIEW(dev)) {
+ fw_rendercount = dev_priv->uncore.fw_rendercount;
+ fw_mediacount = dev_priv->uncore.fw_mediacount;
+ } else
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
+ seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
+ } else
+ seq_printf(m, "forcewake count = %u\n", forcewake_count);
+
return 0;
}
-static ssize_t
-i915_wedged_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
+static const char *swizzle_string(unsigned swizzle)
+{
+ switch (swizzle) {
+ case I915_BIT_6_SWIZZLE_NONE:
+ return "none";
+ case I915_BIT_6_SWIZZLE_9:
+ return "bit9";
+ case I915_BIT_6_SWIZZLE_9_10:
+ return "bit9/bit10";
+ case I915_BIT_6_SWIZZLE_9_11:
+ return "bit9/bit11";
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ return "bit9/bit10/bit11";
+ case I915_BIT_6_SWIZZLE_9_17:
+ return "bit9/bit17";
+ case I915_BIT_6_SWIZZLE_9_10_17:
+ return "bit9/bit10/bit17";
+ case I915_BIT_6_SWIZZLE_UNKNOWN:
+ return "unknown";
+ }
+
+ return "bug";
+}
+
+static int i915_swizzle_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+ if (IS_GEN3(dev) || IS_GEN4(dev)) {
+ seq_printf(m, "DDC = 0x%08x\n",
+ I915_READ(DCC));
+ seq_printf(m, "C0DRB3 = 0x%04x\n",
+ I915_READ16(C0DRB3));
+ seq_printf(m, "C1DRB3 = 0x%04x\n",
+ I915_READ16(C1DRB3));
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C0));
+ seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C1));
+ seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C2));
+ seq_printf(m, "TILECTL = 0x%08x\n",
+ I915_READ(TILECTL));
+ if (IS_GEN8(dev))
+ seq_printf(m, "GAMTARBMODE = 0x%08x\n",
+ I915_READ(GAMTARBMODE));
+ else
+ seq_printf(m, "ARB_MODE = 0x%08x\n",
+ I915_READ(ARB_MODE));
+ seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+ I915_READ(DISP_ARB_CTL));
+ }
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int per_file_ctx(int id, void *ptr, void *data)
+{
+ struct intel_context *ctx = ptr;
+ struct seq_file *m = data;
+ struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
+
+ if (i915_gem_context_is_default(ctx))
+ seq_puts(m, " default context:\n");
+ else
+ seq_printf(m, " context %d:\n", ctx->id);
+ ppgtt->debug_dump(ppgtt, m);
+
+ return 0;
+}
+
+static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int unused, i;
+
+ if (!ppgtt)
+ return;
+
+ seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
+ seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
+ for_each_ring(ring, dev_priv, unused) {
+ seq_printf(m, "%s\n", ring->name);
+ for (i = 0; i < 4; i++) {
+ u32 offset = 0x270 + i * 8;
+ u64 pdp = I915_READ(ring->mmio_base + offset + 4);
+ pdp <<= 32;
+ pdp |= I915_READ(ring->mmio_base + offset);
+ seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
+ }
+ }
+}
+
+static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct drm_file *file;
+ int i;
+
+ if (INTEL_INFO(dev)->gen == 6)
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
+
+ for_each_ring(ring, dev_priv, i) {
+ seq_printf(m, "%s\n", ring->name);
+ if (INTEL_INFO(dev)->gen == 7)
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+ seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+ seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+ seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ }
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ seq_puts(m, "aliasing PPGTT:\n");
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+
+ ppgtt->debug_dump(ppgtt, m);
+ } else
+ return;
+
+ list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ seq_printf(m, "proc: %s\n",
+ get_pid_task(file->pid, PIDTYPE_PID)->comm);
+ idr_for_each(&file_priv->context_idr, per_file_ctx, m);
+ }
+ seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ int ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ gen8_ppgtt_info(m, dev);
+ else if (INTEL_INFO(dev)->gen >= 6)
+ gen6_ppgtt_info(m, dev);
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_llc(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Size calculation for LLC is a bit of a pain. Ignore for now. */
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+ seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+
+ return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 psrperf = 0;
+ bool enabled = false;
+
+ intel_runtime_pm_get(dev_priv);
+
+ seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+ seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+
+ enabled = HAS_PSR(dev) &&
+ I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ seq_printf(m, "Enabled: %s\n", yesno(enabled));
+
+ if (HAS_PSR(dev))
+ psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+ EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance_Counter: %u\n", psrperf);
+
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+}
+
+static int i915_sink_crc(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct intel_dp *intel_dp = NULL;
+ int ret;
+ u8 crc[6];
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+
+ if (connector->base.dpms != DRM_MODE_DPMS_ON)
+ continue;
+
+ if (!connector->base.encoder)
+ continue;
+
+ encoder = to_intel_encoder(connector->base.encoder);
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ ret = intel_dp_sink_crc(intel_dp, crc);
+ if (ret)
+ goto out;
+
+ seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
+ crc[0], crc[1], crc[2],
+ crc[3], crc[4], crc[5]);
+ goto out;
+ }
+ ret = -ENODEV;
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static int i915_energy_uJ(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 power;
+ u32 units;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return -ENODEV;
+
+ intel_runtime_pm_get(dev_priv);
+
+ rdmsrl(MSR_RAPL_POWER_UNIT, power);
+ power = (power & 0x1f00) >> 8;
+ units = 1000000 / (1 << power); /* convert to uJ */
+ power = I915_READ(MCH_SECP_NRG_STTS);
+ power *= units;
+
+ intel_runtime_pm_put(dev_priv);
+
+ seq_printf(m, "%llu", (long long unsigned)power);
+
+ return 0;
+}
+
+static int i915_pc8_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+ seq_puts(m, "not supported\n");
+ return 0;
+ }
+
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
+ seq_printf(m, "IRQs disabled: %s\n",
+ yesno(dev_priv->pm.irqs_disabled));
+
+ return 0;
+}
+
+static const char *power_domain_str(enum intel_display_power_domain domain)
+{
+ switch (domain) {
+ case POWER_DOMAIN_PIPE_A:
+ return "PIPE_A";
+ case POWER_DOMAIN_PIPE_B:
+ return "PIPE_B";
+ case POWER_DOMAIN_PIPE_C:
+ return "PIPE_C";
+ case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+ return "PIPE_A_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+ return "PIPE_B_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+ return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_TRANSCODER_A:
+ return "TRANSCODER_A";
+ case POWER_DOMAIN_TRANSCODER_B:
+ return "TRANSCODER_B";
+ case POWER_DOMAIN_TRANSCODER_C:
+ return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return "TRANSCODER_EDP";
+ case POWER_DOMAIN_PORT_DDI_A_2_LANES:
+ return "PORT_DDI_A_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_A_4_LANES:
+ return "PORT_DDI_A_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_2_LANES:
+ return "PORT_DDI_B_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_4_LANES:
+ return "PORT_DDI_B_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_2_LANES:
+ return "PORT_DDI_C_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_4_LANES:
+ return "PORT_DDI_C_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_2_LANES:
+ return "PORT_DDI_D_2_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_4_LANES:
+ return "PORT_DDI_D_4_LANES";
+ case POWER_DOMAIN_PORT_DSI:
+ return "PORT_DSI";
+ case POWER_DOMAIN_PORT_CRT:
+ return "PORT_CRT";
+ case POWER_DOMAIN_PORT_OTHER:
+ return "PORT_OTHER";
+ case POWER_DOMAIN_VGA:
+ return "VGA";
+ case POWER_DOMAIN_AUDIO:
+ return "AUDIO";
+ case POWER_DOMAIN_INIT:
+ return "INIT";
+ default:
+ WARN_ON(1);
+ return "?";
+ }
+}
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->name,
+ power_well->count);
+
+ for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
+ power_domain++) {
+ if (!(BIT(power_domain) & power_well->domains))
+ continue;
+
+ seq_printf(m, " %-23s %d\n",
+ power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+ struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ seq_putc(m, '\t');
+
+ seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+}
+
+static void intel_encoder_info(struct seq_file *m,
+ struct intel_crtc *intel_crtc,
+ struct intel_encoder *intel_encoder)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct intel_connector *intel_connector;
+ struct drm_encoder *encoder;
+
+ encoder = &intel_encoder->base;
+ seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
+ encoder->base.id, encoder->name);
+ for_each_connector_on_encoder(dev, encoder, intel_connector) {
+ struct drm_connector *connector = &intel_connector->base;
+ seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(connector->status));
+ if (connector->status == connector_status_connected) {
+ struct drm_display_mode *mode = &crtc->mode;
+ seq_printf(m, ", mode:\n");
+ intel_seq_print_mode(m, 2, mode);
+ } else {
+ seq_putc(m, '\n');
+ }
+ }
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct intel_encoder *intel_encoder;
+
+ seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+ crtc->primary->fb->base.id, crtc->x, crtc->y,
+ crtc->primary->fb->width, crtc->primary->fb->height);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ intel_encoder_info(m, intel_crtc, intel_encoder);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+ struct drm_display_mode *mode = panel->fixed_mode;
+
+ seq_printf(m, "\tfixed mode:\n");
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static void intel_dp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+ seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
+ "no");
+ if (intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+
+ seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
+ "no");
+}
+
+static void intel_lvds_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct drm_display_mode *mode;
+
+ seq_printf(m, "connector %d: type %s, status: %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(connector->status));
+ if (connector->status == connector_status_connected) {
+ seq_printf(m, "\tname: %s\n", connector->display_info.name);
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n",
+ connector->display_info.cea_rev);
+ }
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+ intel_hdmi_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static bool cursor_active(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 state;
+
+ if (IS_845G(dev) || IS_I865G(dev))
+ state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+ else
+ state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+ return state;
+}
+
+static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pos;
+
+ pos = I915_READ(CURPOS(pipe));
+
+ *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
+ if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
+ *x = -*x;
+
+ *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
+ if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
+ *y = -*y;
+
+ return cursor_active(dev, pipe);
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
{
- struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
- int len;
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct drm_connector *connector;
+
+ intel_runtime_pm_get(dev_priv);
+ drm_modeset_lock_all(dev);
+ seq_printf(m, "CRTC info\n");
+ seq_printf(m, "---------\n");
+ for_each_intel_crtc(dev, crtc) {
+ bool active;
+ int x, y;
+
+ seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+ crtc->base.base.id, pipe_name(crtc->pipe),
+ yesno(crtc->active));
+ if (crtc->active) {
+ intel_crtc_info(m, crtc);
+
+ active = cursor_position(dev, crtc->pipe, &x, &y);
+ seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
+ yesno(crtc->cursor_base),
+ x, y, crtc->cursor_addr,
+ yesno(active));
+ }
- len = snprintf(buf, sizeof (buf),
- "wedged : %d\n",
- atomic_read(&dev_priv->mm.wedged));
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+ }
+
+ seq_printf(m, "\n");
+ seq_printf(m, "Connector info\n");
+ seq_printf(m, "--------------\n");
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ intel_connector_info(m, connector);
+ }
+ drm_modeset_unlock_all(dev);
+ intel_runtime_pm_put(dev_priv);
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ return 0;
+}
+
+struct pipe_crc_info {
+ const char *name;
+ struct drm_device *dev;
+ enum pipe pipe;
+};
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+ return -ENODEV;
+
+ spin_lock_irq(&pipe_crc->lock);
+
+ if (pipe_crc->opened) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EBUSY; /* already open */
+ }
+
+ pipe_crc->opened = true;
+ filep->private_data = inode->i_private;
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->opened = false;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+ assert_spin_locked(&pipe_crc->lock);
+ return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR);
}
static ssize_t
-i915_wedged_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[20];
- int val = 1;
-
- if (cnt > 0) {
- if (cnt > sizeof (buf) - 1)
- return -EINVAL;
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+ loff_t *pos)
+{
+ struct pipe_crc_info *info = filep->private_data;
+ struct drm_device *dev = info->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+ char buf[PIPE_CRC_BUFFER_LEN];
+ int head, tail, n_entries, n;
+ ssize_t bytes_read;
+
+ /*
+ * Don't allow user space to provide buffers not big enough to hold
+ * a line of data.
+ */
+ if (count < PIPE_CRC_LINE_LEN)
+ return -EINVAL;
+
+ if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+ return 0;
- if (copy_from_user(buf, ubuf, cnt))
+ /* nothing to read */
+ spin_lock_irq(&pipe_crc->lock);
+ while (pipe_crc_data_count(pipe_crc) == 0) {
+ int ret;
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+ pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+ if (ret) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return ret;
+ }
+ }
+
+ /* We now have one or more entries to read */
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
+ n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
+ count / PIPE_CRC_LINE_LEN);
+ spin_unlock_irq(&pipe_crc->lock);
+
+ bytes_read = 0;
+ n = 0;
+ do {
+ struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+ int ret;
+
+ bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+ "%8u %8x %8x %8x %8x %8x\n",
+ entry->frame, entry->crc[0],
+ entry->crc[1], entry->crc[2],
+ entry->crc[3], entry->crc[4]);
+
+ ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
+ buf, PIPE_CRC_LINE_LEN);
+ if (ret == PIPE_CRC_LINE_LEN)
return -EFAULT;
- buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 0);
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ n++;
+ } while (--n_entries);
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->tail = tail;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_pipe_crc_open,
+ .read = i915_pipe_crc_read,
+ .release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+ {
+ .name = "i915_pipe_A_crc",
+ .pipe = PIPE_A,
+ },
+ {
+ .name = "i915_pipe_B_crc",
+ .pipe = PIPE_B,
+ },
+ {
+ .name = "i915_pipe_C_crc",
+ .pipe = PIPE_C,
+ },
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+ enum pipe pipe)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+ struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+ info->dev = dev;
+ ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+ &i915_pipe_crc_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+ "none",
+ "plane1",
+ "plane2",
+ "pf",
+ "pipe",
+ "TV",
+ "DP-B",
+ "DP-C",
+ "DP-D",
+ "auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+ return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ seq_printf(m, "%c %s\n", pipe_name(i),
+ pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+ return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
}
- DRM_INFO("Manually setting wedged to %d\n", val);
+ return 0;
+}
- atomic_set(&dev_priv->mm.wedged, val);
- if (val) {
- DRM_WAKEUP(&dev_priv->irq_queue);
- queue_work(dev_priv->wq, &dev_priv->error_work);
+static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+ enum intel_pipe_crc_source *source)
+{
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ struct intel_digital_port *dig_port;
+ int ret = 0;
+
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->pipe != pipe)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_TVOUT:
+ *source = INTEL_PIPE_CRC_SOURCE_TV;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_EDP:
+ dig_port = enc_to_dig_port(&encoder->base);
+ switch (dig_port->port) {
+ case PORT_B:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_B;
+ break;
+ case PORT_C:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_C;
+ break;
+ case PORT_D:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_D;
+ break;
+ default:
+ WARN(1, "nonexisting DP port %c\n",
+ port_name(dig_port->port));
+ break;
+ }
+ break;
+ }
}
+ drm_modeset_unlock_all(dev);
- return cnt;
+ return ret;
}
-static const struct file_operations i915_wedged_fops = {
- .owner = THIS_MODULE,
- .open = i915_wedged_open,
- .read = i915_wedged_read,
- .write = i915_wedged_write,
+static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ tmp |= DC_BALANCE_RESET_VLV;
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ if (!SUPPORTS_TV(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_G4X(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ WARN_ON(!IS_G4X(dev));
+
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+ tmp &= ~DC_BALANCE_RESET_VLV;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+ }
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PF;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PF:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+ enum intel_pipe_crc_source source)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ u32 val = 0; /* shut up gcc */
+ int ret;
+
+ if (pipe_crc->source == source)
+ return 0;
+
+ /* forbid changing the source without going back to 'none' */
+ if (pipe_crc->source && source)
+ return -EINVAL;
+
+ if (IS_GEN2(dev))
+ ret = i8xx_pipe_crc_ctl_reg(&source, &val);
+ else if (INTEL_INFO(dev)->gen < 5)
+ ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+ else if (IS_VALLEYVIEW(dev))
+ ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ ret = ilk_pipe_crc_ctl_reg(&source, &val);
+ else
+ ret = ivb_pipe_crc_ctl_reg(&source, &val);
+
+ if (ret != 0)
+ return ret;
+
+ /* none -> real source transition */
+ if (source) {
+ DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+ pipe_name(pipe), pipe_crc_source_name(source));
+
+ pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
+ INTEL_PIPE_CRC_ENTRIES_NR,
+ GFP_KERNEL);
+ if (!pipe_crc->entries)
+ return -ENOMEM;
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
+ spin_unlock_irq(&pipe_crc->lock);
+ }
+
+ pipe_crc->source = source;
+
+ I915_WRITE(PIPE_CRC_CTL(pipe), val);
+ POSTING_READ(PIPE_CRC_CTL(pipe));
+
+ /* real source -> none transition */
+ if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+ struct intel_pipe_crc_entry *entries;
+
+ DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+ pipe_name(pipe));
+
+ intel_wait_for_vblank(dev, pipe);
+
+ spin_lock_irq(&pipe_crc->lock);
+ entries = pipe_crc->entries;
+ pipe_crc->entries = NULL;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ kfree(entries);
+
+ if (IS_G4X(dev))
+ g4x_undo_pipe_scramble_reset(dev, pipe);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_undo_pipe_scramble_reset(dev, pipe);
+ }
+
+ return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ * command: wsp* object wsp+ name wsp+ source wsp*
+ * object: 'pipe'
+ * name: (A | B | C)
+ * source: (none | plane1 | plane2 | pf)
+ * wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
+ * "pipe A none" -> Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+ int n_words = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* skip leading white space */
+ buf = skip_spaces(buf);
+ if (!*buf)
+ break; /* end of buffer */
+
+ /* find end of word */
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+
+ if (n_words == max_words) {
+ DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+ max_words);
+ return -EINVAL; /* ran out of words[] before bytes */
+ }
+
+ if (*end)
+ *end++ = '\0';
+ words[n_words++] = buf;
+ buf = end;
+ }
+
+ return n_words;
+}
+
+enum intel_pipe_crc_object {
+ PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+ "pipe",
};
-/* As the drm_debugfs_init() routines are called before dev->dev_private is
- * allocated we need to hook into the minor for release. */
static int
-drm_add_fake_info_node(struct drm_minor *minor,
- struct dentry *ent,
- const void *key)
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
{
- struct drm_info_node *node;
+ int i;
- node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (node == NULL) {
- debugfs_remove(ent);
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+ if (!strcmp(buf, pipe_crc_objects[i])) {
+ *o = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+ const char name = buf[0];
+
+ if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+ return -EINVAL;
+
+ *pipe = name - 'A';
+
+ return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+ if (!strcmp(buf, pipe_crc_sources[i])) {
+ *s = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+ int n_words;
+ char *words[N_WORDS];
+ enum pipe pipe;
+ enum intel_pipe_crc_object object;
+ enum intel_pipe_crc_source source;
+
+ n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+ if (n_words != N_WORDS) {
+ DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+ N_WORDS);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+ DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+ DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+ return -EINVAL;
+ }
+
+ return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+ char *tmpbuf;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+ if (!tmpbuf)
return -ENOMEM;
+
+ if (copy_from_user(tmpbuf, ubuf, len)) {
+ ret = -EFAULT;
+ goto out;
}
+ tmpbuf[len] = '\0';
- node->minor = minor;
- node->dent = ent;
- node->info_ent = (void *) key;
- list_add(&node->list, &minor->debugfs_nodes.list);
+ ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+ kfree(tmpbuf);
+ if (ret < 0)
+ return ret;
+
+ *offp += len;
+ return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = display_crc_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = display_crc_ctl_write
+};
+
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+{
+ struct drm_device *dev = m->private;
+ int num_levels = ilk_wm_max_level(dev) + 1;
+ int level;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /* WM1+ latency values in 0.5us units */
+ if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level],
+ latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.pri_latency);
return 0;
}
-static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+
+ wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, uint16_t wm[5])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+ uint16_t new[5] = { 0 };
+ int num_levels = ilk_wm_max_level(dev) + 1;
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+ if (ret != num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(dev);
+
+ return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *dev = m->private;
+
+ return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+static int
+i915_wedged_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+ return 0;
+}
+
+static int
+i915_wedged_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_runtime_pm_get(dev_priv);
+
+ i915_handle_error(dev, val,
+ "Manually setting wedged to %llu", val);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
+ i915_wedged_get, i915_wedged_set,
+ "%llu\n");
+
+static int
+i915_ring_stop_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = dev_priv->gpu_error.stop_rings;
+
+ return 0;
+}
+
+static int
+i915_ring_stop_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ dev_priv->gpu_error.stop_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
+ i915_ring_stop_get, i915_ring_stop_set,
+ "0x%08llx\n");
+
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = dev_priv->gpu_error.missed_irq_rings;
+ return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* Lock against concurrent debugfs callers */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ dev_priv->gpu_error.missed_irq_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+ i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+ "0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ *val = dev_priv->gpu_error.test_irq_rings;
+
+ return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+ /* Lock against concurrent debugfs callers */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ dev_priv->gpu_error.test_irq_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+ i915_ring_test_irq_get, i915_ring_test_irq_set,
+ "0x%08llx\n");
+
+#define DROP_UNBOUND 0x1
+#define DROP_BOUND 0x2
+#define DROP_RETIRE 0x4
+#define DROP_ACTIVE 0x8
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE)
+static int
+i915_drop_caches_get(void *data, u64 *val)
+{
+ *val = DROP_ALL;
+
+ return 0;
+}
+
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj, *next;
+ struct i915_address_space *vm;
+ struct i915_vma *vma, *x;
+ int ret;
+
+ DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
+
+ /* No need to check and wait for gpu resets, only libdrm auto-restarts
+ * on ioctls on -EAGAIN. */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (val & DROP_ACTIVE) {
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto unlock;
+ }
+
+ if (val & (DROP_RETIRE | DROP_ACTIVE))
+ i915_gem_retire_requests(dev);
+
+ if (val & DROP_BOUND) {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(vma, x, &vm->inactive_list,
+ mm_list) {
+ if (vma->pin_count)
+ continue;
+
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ goto unlock;
+ }
+ }
+ }
+
+ if (val & DROP_UNBOUND) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
+ global_list)
+ if (obj->pages_pin_count == 0) {
+ ret = i915_gem_object_put_pages(obj);
+ if (ret)
+ goto unlock;
+ }
+ }
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
+ i915_drop_caches_get, i915_drop_caches_set,
+ "0x%08llx\n");
+
+static int
+i915_max_freq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ if (ret)
+ return ret;
+
+ if (IS_VALLEYVIEW(dev))
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+ else
+ *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return 0;
+}
+
+static int
+i915_max_freq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rp_state_cap, hw_max, hw_min;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
+
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ if (ret)
+ return ret;
+
+ /*
+ * Turbo will still be enabled, but won't go above the set value.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ val = vlv_freq_opcode(dev_priv, val);
+
+ hw_max = valleyview_rps_max_freq(dev_priv);
+ hw_min = valleyview_rps_min_freq(dev_priv);
+ } else {
+ do_div(val, GT_FREQUENCY_MULTIPLIER);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = (rp_state_cap >> 16) & 0xff;
+ }
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
+ }
+
+ dev_priv->rps.max_freq_softlimit = val;
+
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
+ i915_max_freq_get, i915_max_freq_set,
+ "%llu\n");
+
+static int
+i915_min_freq_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ if (ret)
+ return ret;
+
+ if (IS_VALLEYVIEW(dev))
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+ else
+ *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return 0;
+}
+
+static int
+i915_min_freq_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rp_state_cap, hw_max, hw_min;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
+
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ if (ret)
+ return ret;
+
+ /*
+ * Turbo will still be enabled, but won't go below the set value.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ val = vlv_freq_opcode(dev_priv, val);
+
+ hw_max = valleyview_rps_max_freq(dev_priv);
+ hw_min = valleyview_rps_min_freq(dev_priv);
+ } else {
+ do_div(val, GT_FREQUENCY_MULTIPLIER);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = (rp_state_cap >> 16) & 0xff;
+ }
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
+ }
+
+ dev_priv->rps.min_freq_softlimit = val;
+
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
+ i915_min_freq_get, i915_min_freq_set,
+ "%llu\n");
+
+static int
+i915_cache_sharing_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 snpcr;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
+
+ return 0;
+}
+
+static int
+i915_cache_sharing_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 snpcr;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ if (val > 3)
+ return -EINVAL;
+
+ intel_runtime_pm_get(dev_priv);
+ DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
+ i915_cache_sharing_get, i915_cache_sharing_set,
+ "%llu\n");
+
+static int i915_forcewake_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ return 0;
+}
+
+static int i915_forcewake_release(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+
+ return 0;
+}
+
+static const struct file_operations i915_forcewake_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_forcewake_open,
+ .release = i915_forcewake_release,
+};
+
+static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_forcewake_user",
+ S_IRUSR,
+ root, dev,
+ &i915_forcewake_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
+}
+
+static int i915_debugfs_create(struct dentry *root,
+ struct drm_minor *minor,
+ const char *name,
+ const struct file_operations *fops)
{
struct drm_device *dev = minor->dev;
struct dentry *ent;
- ent = debugfs_create_file("i915_wedged",
+ ent = debugfs_create_file(name,
S_IRUGO | S_IWUSR,
root, dev,
- &i915_wedged_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ fops);
+ if (!ent)
+ return -ENOMEM;
- return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+ return drm_add_fake_info_node(minor, ent, fops);
}
-static struct drm_info_list i915_debugfs_list[] = {
+static const struct drm_info_list i915_debugfs_list[] = {
+ {"i915_capabilities", i915_capabilities, 0},
+ {"i915_gem_objects", i915_gem_object_info, 0},
+ {"i915_gem_gtt", i915_gem_gtt_info, 0},
+ {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
- {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_stolen", i915_gem_stolen_list_info },
+ {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_hws", i915_hws_info, 0},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
- {"i915_batchbuffers", i915_batchbuffer_info, 0},
- {"i915_error_state", i915_error_state, 0},
+ {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+ {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+ {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+ {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
+ {"i915_rstdby_delays", i915_rstdby_delays, 0},
+ {"i915_frequency_info", i915_frequency_info, 0},
+ {"i915_delayfreq_table", i915_delayfreq_table, 0},
+ {"i915_inttoext_table", i915_inttoext_table, 0},
+ {"i915_drpc_info", i915_drpc_info, 0},
+ {"i915_emon_status", i915_emon_status, 0},
+ {"i915_ring_freq_table", i915_ring_freq_table, 0},
+ {"i915_gfxec", i915_gfxec, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_ips_status", i915_ips_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ {"i915_context_status", i915_context_status, 0},
+ {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+ {"i915_swizzle_info", i915_swizzle_info, 0},
+ {"i915_ppgtt_info", i915_ppgtt_info, 0},
+ {"i915_llc", i915_llc, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_sink_crc_eDP1", i915_sink_crc, 0},
+ {"i915_energy_uJ", i915_energy_uJ, 0},
+ {"i915_pc8_status", i915_pc8_status, 0},
+ {"i915_power_domain_info", i915_power_domain_info, 0},
+ {"i915_display_info", i915_display_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+static const struct i915_debugfs_files {
+ const char *name;
+ const struct file_operations *fops;
+} i915_debugfs_files[] = {
+ {"i915_wedged", &i915_wedged_fops},
+ {"i915_max_freq", &i915_max_freq_fops},
+ {"i915_min_freq", &i915_min_freq_fops},
+ {"i915_cache_sharing", &i915_cache_sharing_fops},
+ {"i915_ring_stop", &i915_ring_stop_fops},
+ {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+ {"i915_ring_test_irq", &i915_ring_test_irq_fops},
+ {"i915_gem_drop_caches", &i915_drop_caches_fops},
+ {"i915_error_state", &i915_error_state_fops},
+ {"i915_next_seqno", &i915_next_seqno_fops},
+ {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
+ {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+ {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+ {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+};
+
+void intel_display_crc_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+
+ for_each_pipe(pipe) {
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+
+ pipe_crc->opened = false;
+ spin_lock_init(&pipe_crc->lock);
+ init_waitqueue_head(&pipe_crc->wq);
+ }
+}
+
int i915_debugfs_init(struct drm_minor *minor)
{
- int ret;
+ int ret, i;
- ret = i915_wedged_create(minor->debugfs_root, minor);
+ ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ i915_debugfs_files[i].name,
+ i915_debugfs_files[i].fops);
+ if (ret)
+ return ret;
+ }
+
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -521,10 +3889,25 @@ int i915_debugfs_init(struct drm_minor *minor)
void i915_debugfs_cleanup(struct drm_minor *minor)
{
+ int i;
+
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
+
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1, minor);
-}
-#endif /* CONFIG_DEBUG_FS */
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *)&i915_pipe_crc_data[i];
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *) i915_debugfs_files[i].fops;
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index bbe47812e4b..d4434414062 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,117 +26,93 @@
*
*/
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc_helper.h"
-#include "drm_fb_helper.h"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include <linux/pci.h>
+#include <linux/console.h>
+#include <linux/vt.h>
#include <linux/vgaarb.h>
+#include <linux/acpi.h>
+#include <linux/pnp.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <acpi/video.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/oom.h>
-/* Really want an OS-independent resettable timer. Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
- u32 last_acthd = I915_READ(acthd_reg);
- u32 acthd;
- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- int i;
-
- trace_i915_ring_wait_begin (dev);
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
- for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- acthd = I915_READ(acthd_reg);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
- return 0;
- }
-
- if (dev->primary->master) {
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- }
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
- if (ring->head != last_head)
- i = 0;
- if (acthd != last_acthd)
- i = 0;
+#define ADVANCE_LP_RING() \
+ __intel_ring_advance(LP_RING(dev_priv))
- last_head = ring->head;
- last_acthd = acthd;
- msleep_interruptible(10);
-
- }
-
- trace_i915_ring_wait_end (dev);
- return -EBUSY;
-}
-
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
*/
-int i915_wrap_ring(struct drm_device *dev)
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
+} while (0)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- volatile unsigned int *virt;
- int rem;
+ if (I915_NEED_GFX_HWS(dev_priv->dev))
+ return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
+ else
+ return intel_read_status_page(LP_RING(dev_priv), reg);
+}
- rem = dev_priv->ring.Size - dev_priv->ring.tail;
- if (dev_priv->ring.space < rem) {
- int ret = i915_wait_ring(dev, rem, __func__);
- if (ret)
- return ret;
- }
- dev_priv->ring.space -= rem;
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX 0x21
- virt = (unsigned int *)
- (dev_priv->ring.virtual_start + dev_priv->ring.tail);
- rem /= 4;
- while (rem--)
- *virt++ = MI_NOOP;
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
- dev_priv->ring.tail = 0;
+ /*
+ * The dri breadcrumb update races against the drm master disappearing.
+ * Instead of trying to fix this (this is by far not the only ums issue)
+ * just don't do the update in kms mode.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
- return 0;
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
}
-/**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-static int i915_init_phys_hws(struct drm_device *dev)
+static void i915_write_hws_pga(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- /* Program Hardware Status Page */
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
-
- if (!dev_priv->status_page_dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 addr;
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- DRM_DEBUG_DRIVER("Enabled hardware status page\n");
- return 0;
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
}
/**
@@ -145,15 +121,17 @@ static int i915_init_phys_hws(struct drm_device *dev)
*/
static void i915_free_hws(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
+
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ if (ring->status_page.gfx_addr) {
+ ring->status_page.gfx_addr = 0;
+ iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
}
/* Need to rewrite hardware status page */
@@ -162,9 +140,10 @@ static void i915_free_hws(struct drm_device *dev)
void i915_kernel_lost_context(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
/*
* We should never lose context on the ring with modesetting
@@ -173,23 +152,25 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
+ ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
+ if (ringbuf->space < 0)
+ ringbuf->space += ringbuf->size;
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
- if (ring->head == ring->tail && master_priv->sarea_priv)
+ if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
static int i915_dma_cleanup(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@@ -197,12 +178,10 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = NULL;
- dev_priv->ring.map.handle = NULL;
- dev_priv->ring.map.size = 0;
- }
+ mutex_lock(&dev->struct_mutex);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+ mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
@@ -213,8 +192,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret;
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
@@ -225,71 +205,61 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->ring.ring_obj != NULL) {
+ if (LP_RING(dev_priv)->buffer->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap_wc(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
+ ret = intel_render_ring_init_dri(dev,
+ init->ring_start,
+ init->ring_size);
+ if (ret) {
i915_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
+ return ret;
}
}
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->cpp = init->cpp;
- dev_priv->back_offset = init->back_offset;
- dev_priv->front_offset = init->front_offset;
- dev_priv->current_page = 0;
+ dev_priv->dri1.cpp = init->cpp;
+ dev_priv->dri1.back_offset = init->back_offset;
+ dev_priv->dri1.front_offset = init->front_offset;
+ dev_priv->dri1.current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
/* Allow hardware batchbuffers unless told otherwise.
*/
- dev_priv->allow_batchbuffer = 1;
+ dev_priv->dri1.allow_batchbuffer = 1;
return 0;
}
static int i915_dma_resume(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (dev_priv->ring.map.handle == NULL) {
+ if (ring->buffer->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
/* Program Hardware Status Page */
- if (!dev_priv->hw_status_page) {
+ if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
- dev_priv->hw_status_page);
-
- if (dev_priv->status_gfx_addr != 0)
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ ring->status_page.page_addr);
+ if (ring->status_page.gfx_addr != 0)
+ intel_ring_setup_status_page(ring);
else
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ i915_write_hws_pga(dev);
+
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
@@ -301,6 +271,9 @@ static int i915_dma_init(struct drm_device *dev, void *data,
drm_i915_init_t *init = data;
int retcode = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
@@ -328,7 +301,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
-static int do_validate_cmd(int cmd)
+static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
@@ -386,41 +359,27 @@ static int do_validate_cmd(int cmd)
return 0;
}
-static int validate_cmd(int cmd)
-{
- int ret = do_validate_cmd(cmd);
-
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
- return ret;
-}
-
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i, ret;
- if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
return -EINVAL;
- BEGIN_LP_RING((dwords+1)&~1);
-
for (i = 0; i < dwords;) {
- int cmd, sz;
-
- cmd = buffer[i];
-
- if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+ int sz = validate_cmd(buffer[i]);
+ if (sz == 0 || i + sz > dwords)
return -EINVAL;
-
- OUT_RING(cmd);
-
- while (++i, --sz) {
- OUT_RING(buffer[i]);
- }
+ i += sz;
}
+ ret = BEGIN_LP_RING((dwords+1)&~1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
@@ -431,36 +390,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
int
i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4)
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect box = boxes[i];
- RING_LOCALS;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box.x1, box.y1, box.x2, box.y2);
+ box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
- if (IS_I965G(dev)) {
- BEGIN_LP_RING(4);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
return 0;
}
@@ -471,22 +435,22 @@ i915_emit_box(struct drm_device *dev,
static void i915_emit_breadcrumb(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 0;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -508,7 +472,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- ret = i915_emit_box(dev, cliprects, i,
+ ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
@@ -527,10 +491,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
- int i = 0, count;
- RING_LOCALS;
+ int i, count, ret;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -540,99 +503,110 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
i915_kernel_lost_context(dev);
count = nbox ? nbox : 1;
-
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- batch->DR1, batch->DR4);
+ ret = i915_emit_box(dev, &cliprects[i],
+ batch->DR1, batch->DR4);
if (ret)
return ret;
}
if (!IS_I830(dev) && !IS_845G(dev)) {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
+ }
+ ADVANCE_LP_RING();
+ }
+
+
+ if (IS_G4X(dev) || IS_GEN5(dev)) {
+ if (BEGIN_LP_RING(2) == 0) {
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
}
i915_emit_breadcrumb(dev);
-
return 0;
}
static int i915_dispatch_flip(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
- RING_LOCALS;
+ int ret;
if (!master_priv->sarea_priv)
return -EINVAL;
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
- dev_priv->current_page,
+ dev_priv->dri1.current_page,
master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(10);
+ if (ret)
+ return ret;
+
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(6);
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
+ if (dev_priv->dri1.current_page == 0) {
+ OUT_RING(dev_priv->dri1.back_offset);
+ dev_priv->dri1.current_page = 1;
} else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
+ OUT_RING(dev_priv->dri1.front_offset);
+ dev_priv->dri1.current_page = 0;
}
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
+
ADVANCE_LP_RING();
- master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
- master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
return 0;
}
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+ return intel_ring_idle(LP_RING(dev->dev_private));
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -640,6 +614,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
@@ -652,15 +629,20 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- master_priv->sarea_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ drm_i915_sarea_t *sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
struct drm_clip_rect *cliprects = NULL;
- if (!dev_priv->allow_batchbuffer) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ master_priv = dev->primary->master->driver_priv;
+ sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
+
+ if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
@@ -675,7 +657,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
- sizeof(struct drm_clip_rect),
+ sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
@@ -683,8 +665,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -703,10 +687,9 @@ fail_free:
static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- master_priv->sarea_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ drm_i915_sarea_t *sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
@@ -715,6 +698,12 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ master_priv = dev->primary->master->driver_priv;
+ sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects < 0)
@@ -725,20 +714,26 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_batch_free;
+ }
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
- sizeof(struct drm_clip_rect), GFP_KERNEL);
- if (cliprects == NULL)
+ sizeof(*cliprects), GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto fail_batch_free;
+ }
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_clip_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -760,11 +755,166 @@ fail_batch_free:
return ret;
}
+static int i915_emit_irq(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 1;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+
+ return dev_priv->dri1.counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret = 0;
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+ }
+
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
+ }
+
+ return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+static int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+static int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t *irqwait = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+static int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+}
+
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
DRM_DEBUG_DRIVER("%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -779,7 +929,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_getparam_t *param = data;
int value;
@@ -793,16 +943,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
+ value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
- value = dev->pci_device;
+ value = dev->pdev->device;
break;
case I915_PARAM_HAS_GEM:
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -815,16 +965,72 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
- value = dev_priv->has_gem;
+ value = 1;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = intel_ring_initialized(&dev_priv->ring[VCS]);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = intel_ring_initialized(&dev_priv->ring[BCS]);
+ break;
+ case I915_PARAM_HAS_VEBOX:
+ value = intel_ring_initialized(&dev_priv->ring[VECS]);
+ break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(dev);
+ break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
+ break;
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ value = 1;
+ break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version();
break;
default:
- DRM_DEBUG_DRIVER("Unknown parameter %d\n",
- param->param);
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
- DRM_ERROR("DRM_COPY_TO_USER failed\n");
+ if (copy_to_user(param->value, &value, sizeof(int))) {
+ DRM_ERROR("copy_to_user failed\n");
return -EFAULT;
}
@@ -834,7 +1040,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
if (!dev_priv) {
@@ -846,10 +1052,9 @@ static int i915_setparam(struct drm_device *dev, void *data,
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param->value;
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->allow_batchbuffer = param->value;
+ dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
@@ -870,8 +1075,12 @@ static int i915_setparam(struct drm_device *dev, void *data,
static int i915_set_status_page(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
+ struct intel_engine_cs *ring;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -888,30 +1097,26 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
-
- dev_priv->hws_map.offset = dev->agp->base + hws->addr;
- dev_priv->hws_map.size = 4*1024;
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
+ ring = LP_RING(dev_priv);
+ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
- drm_core_ioremap_wc(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
+ dev_priv->dri1.gfx_hws_cpu_addr =
+ ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
+ if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
- dev_priv->status_gfx_addr = 0;
+ ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
+ ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
- dev_priv->hw_status_page);
+ ring->status_page.page_addr);
return 0;
}
@@ -919,7 +1124,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
return -1;
@@ -927,257 +1132,121 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
-/**
- * i915_probe_agp - get AGP bootup configuration
- * @pdev: PCI device
- * @aperture_size: returns AGP aperture configured size
- * @preallocated_size: returns size of BIOS preallocated AGP space
- *
- * Since Intel integrated graphics are UMA, the BIOS has to set aside
- * some RAM for the framebuffer at early boot. This code figures out
- * how much was set aside so we can use it for our own purposes.
- */
-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size,
- uint32_t *start)
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u16 tmp = 0;
- unsigned long overhead;
- unsigned long stolen;
-
- /* Get the fb aperture size and "stolen" memory amount. */
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
-
- *aperture_size = 1024 * 1024;
- *preallocated_size = 1024 * 1024;
-
- switch (dev->pdev->device) {
- case PCI_DEVICE_ID_INTEL_82830_CGC:
- case PCI_DEVICE_ID_INTEL_82845G_IG:
- case PCI_DEVICE_ID_INTEL_82855GM_IG:
- case PCI_DEVICE_ID_INTEL_82865_IG:
- if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
- *aperture_size *= 64;
- else
- *aperture_size *= 128;
- break;
- default:
- /* 9xx supports large sizes, just look at the length */
- *aperture_size = pci_resource_len(dev->pdev, 2);
- break;
- }
+ int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
- /*
- * Some of the preallocated space is taken by the GTT
- * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
- */
- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
- overhead = 4096;
- else
- overhead = (*aperture_size / 1024) + 4096;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
- switch (tmp & INTEL_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- stolen = 1 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- stolen = 4 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- stolen = 8 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- stolen = 16 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- stolen = 48 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_GMCH_GMS_MASK);
- return -1;
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+#endif
+
+ /* Get some space for it */
+ dev_priv->mch_res.name = "i915 MCHBAR";
+ dev_priv->mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+ &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ dev_priv->bridge_dev);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ return ret;
}
- *preallocated_size = stolen - overhead;
- *start = overhead;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
return 0;
}
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_gtt_to_phys - take a GTT address and turn it into a physical one
- * @dev: drm device
- * @gtt_addr: address to translate
- *
- * Some chip functions require allocations from stolen space but need the
- * physical address of the memory in question. We use this routine
- * to get a physical address suitable for register programming from a given
- * GTT address.
- */
-static unsigned long i915_gtt_to_phys(struct drm_device *dev,
- unsigned long gtt_addr)
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
{
- unsigned long *gtt;
- unsigned long entry, phys;
- int gtt_bar = IS_I9XX(dev) ? 0 : 1;
- int gtt_offset, gtt_size;
-
- if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
- gtt_offset = 2*1024*1024;
- gtt_size = 2*1024*1024;
- } else {
- gtt_offset = 512*1024;
- gtt_size = 512*1024;
- }
- } else {
- gtt_bar = 3;
- gtt_offset = 0;
- gtt_size = pci_resource_len(dev->pdev, gtt_bar);
- }
-
- gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
- gtt_size);
- if (!gtt) {
- DRM_ERROR("ioremap of GTT failed\n");
- return 0;
- }
-
- entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
- /* Mask out these reserved bits on this hardware. */
- if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
- IS_I945G(dev) || IS_I945GM(dev)) {
- entry &= ~PTE_ADDRESS_MASK_HIGH;
- }
+ if (IS_VALLEYVIEW(dev))
+ return;
- /* If it's not a mapping type we know, then bail. */
- if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
- (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
- iounmap(gtt);
- return 0;
- }
+ dev_priv->mchbar_need_disable = false;
- if (!(entry & PTE_VALID)) {
- DRM_ERROR("bad GTT entry in stolen space\n");
- iounmap(gtt);
- return 0;
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
}
- iounmap(gtt);
-
- phys =(entry & PTE_ADDRESS_MASK) |
- ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+ if (intel_alloc_mchbar_resource(dev))
+ return;
- return phys;
-}
+ dev_priv->mchbar_need_disable = true;
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
- DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+ }
}
-static void i915_setup_compression(struct drm_device *dev, int size)
+static void
+intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *compressed_llb;
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
- if (!compressed_fb) {
- i915_warn_stolen(dev);
- return;
- }
-
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb) {
- i915_warn_stolen(dev);
- return;
- }
-
- cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
- if (!cfb_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- }
-
- if (!IS_GM45(dev)) {
- compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
- 4096, 0);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
- if (!ll_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- drm_mm_put_block(compressed_llb);
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ temp &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ temp &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
}
}
- dev_priv->cfb_size = size;
-
- if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- i8xx_disable_fbc(dev);
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- }
-
- DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
- ll_base, size >> 20);
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
}
/* true = enable decode, false = disable decoder */
@@ -1193,89 +1262,140 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_start,
- unsigned long prealloc_size,
- unsigned long agp_size)
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fb_bar = IS_I9XX(dev) ? 2 : 0;
- int ret = 0;
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_err("switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
- dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
- 0xff000000;
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
- /* Basic memrange allocator for stolen space (aka vram) */
- drm_mm_init(&dev_priv->vram, 0, prealloc_size);
- DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return dev->open_count == 0;
+}
- /* We're off and running w/KMS */
- dev_priv->mm.suspended = 0;
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
- /* Let GEM Manage from end of prealloc space to end of aperture.
- *
- * However, leave one page at the end still bound to the scratch page.
- * There are a number of places where the hardware apparently
- * prefetches past the end of the object, and we've seen multiple
- * hangs with the GPU head pointer stuck in a batchbuffer bound
- * at the last page of the aperture. One page should be enough to
- * keep any prefetching inside of the aperture.
- */
- i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_init_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
+ ret = intel_parse_bios(dev);
if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ if (ret && ret != -ENODEV)
goto out;
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
+ intel_register_dsm_handler();
- /* Try to get an 8M buffer... */
- if (prealloc_size > (9*1024*1024))
- cfb_size = 8*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
- }
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+ if (ret)
+ goto cleanup_vga_client;
- /* Allow hardware batchbuffers unless told otherwise.
+ /* Initialise stolen first so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
*/
- dev_priv->allow_batchbuffer = 1;
-
- ret = intel_init_bios(dev);
+ ret = i915_gem_init_stolen(dev);
if (ret)
- DRM_INFO("failed to find VBIOS tables\n");
+ goto cleanup_vga_switcheroo;
- /* if we have > 1 VGA cards, then disable the radeon VGA resources */
- ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ intel_power_domains_init_hw(dev_priv);
+
+ ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_gem_stolen;
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
- ret = drm_irq_install(dev);
+ ret = i915_gem_init(dev);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_irq;
+
+ INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
+
+ intel_modeset_gem_init(dev);
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
- dev->vblank_disable_allowed = 1;
+ dev->vblank_disable_allowed = true;
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return 0;
+
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_gem;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(dev);
/*
- * Initialize the hardware status page IRQ location.
+ * Some ports require correctly set-up hpd registers for detection to
+ * work properly (leading to ghost connected connector status), e.g. VGA
+ * on gm45. Hence we can only set up the initial fbdev config after hpd
+ * irqs are fully enabled. Now we should scan for the initial config
+ * only once hotplug handling is enabled, but due to screwed-up locking
+ * around kms/fbdev init we can't protect the fdbev initial config
+ * scanning against hotplug events. Hence do this first and ignore the
+ * tiny window where we will loose hotplug notifactions.
*/
+ intel_fbdev_initial_config(dev);
- I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ dev_priv->enable_hotplug_processing = true;
- drm_helper_initial_config(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
-destroy_ringbuffer:
+cleanup_gem:
+ mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
+cleanup_irq:
+ drm_irq_uninstall(dev);
+cleanup_gem_stolen:
+ i915_gem_cleanup_stolen(dev);
+cleanup_vga_switcheroo:
+ vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
return ret;
}
@@ -1304,41 +1424,138 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_get_mem_freq(struct drm_device *dev)
+#if IS_ENABLED(CONFIG_FB)
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 tmp;
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+ bool primary;
- if (!IS_PINEVIEW(dev))
+ ap = alloc_apertures(1);
+ if (!ap)
return;
- tmp = I915_READ(CLKCFG);
+ ap->ranges[0].base = dev_priv->gtt.mappable_base;
+ ap->ranges[0].size = dev_priv->gtt.mappable_end;
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+ kfree(ap);
+}
+#else
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+}
+#endif
+
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ DRM_INFO("Replacing VGA console driver\n");
+
+ console_lock();
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (ret == 0) {
+ ret = do_unregister_con_driver(&vga_con);
+
+ /* Ignore "already unregistered". */
+ if (ret == -ENODEV)
+ ret = 0;
}
+ console_unlock();
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
+ return ret;
+}
+#endif
+
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+ const struct intel_device_info *info = &dev_priv->info;
+
+#define PRINT_S(name) "%s"
+#define SEP_EMPTY
+#define PRINT_FLAG(name) info->name ? #name "," : ""
+#define SEP_COMMA ,
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+ info->gen,
+ dev_priv->dev->pdev->device,
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
+#undef PRINT_S
+#undef SEP_EMPTY
+#undef PRINT_FLAG
+#undef SEP_COMMA
+}
+
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ * - it's judged too laborious to fill n static structures with the limit
+ * when a simple if statement does the job,
+ * - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ * - after the MMIO has been setup as we are reading registers,
+ * - after the PCH has been detected,
+ * - before the first usage of the fields it can tweak.
+ */
+static void intel_device_info_runtime_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_device_info *info;
+ enum pipe pipe;
+
+ info = (struct intel_device_info *)&dev_priv->info;
+
+ if (IS_VALLEYVIEW(dev))
+ for_each_pipe(pipe)
+ info->num_sprites[pipe] = 2;
+ else
+ for_each_pipe(pipe)
+ info->num_sprites[pipe] = 1;
+
+ if (i915.disable_display) {
+ DRM_INFO("Display disabled (module parameter)\n");
+ info->num_pipes = 0;
+ } else if (info->num_pipes > 0 &&
+ (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
+ !IS_VALLEYVIEW(dev)) {
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+ /*
+ * SFUSE_STRAP is supposed to have a bit signalling the display
+ * is fused off. Unfortunately it seems that, at least in
+ * certain cases, fused off display means that PCH display
+ * reads don't land anywhere. In that case, we read 0s.
+ *
+ * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+ * should be set when taking over after the firmware.
+ */
+ if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+ sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+ (dev_priv->pch_type == PCH_CPT &&
+ !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+ DRM_INFO("Display fused off, disabling\n");
+ info->num_pipes = 0;
+ }
}
}
@@ -1355,106 +1572,163 @@ static void i915_get_mem_freq(struct drm_device *dev)
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- resource_size_t base, size;
- int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size, prealloc_start;
-
- /* i915 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info *info, *device_info;
+ int ret = 0, mmio_bar, mmio_size;
+ uint32_t aperture_size;
+
+ info = (struct intel_device_info *) flags;
+
+ /* Refuse to load on gen6+ without kms enabled. */
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
+ DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
+ return -ENODEV;
+ }
+
+ /* UMS needs agp support. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
+ return -EINVAL;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
- dev_priv->info = (struct intel_device_info *) flags;
- /* Add register map (needed for suspend/resume) */
- mmio_bar = IS_I9XX(dev) ? 0 : 1;
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
+ /* copy initial configuration to dev_priv->info */
+ device_info = (struct intel_device_info *)&dev_priv->info;
+ *device_info = *info;
+
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->backlight_lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
+ mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+
+ intel_pm_setup(dev);
+
+ intel_display_crc_init(dev);
+
+ i915_dump_device_info(dev_priv);
+
+ /* Not all pre-production machines fall into this category, only the
+ * very first ones. Almost everything should work, except for maybe
+ * suspend/resume. And we don't implement workarounds that affect only
+ * pre-production machines. */
+ if (IS_HSW_EARLY_SDV(dev))
+ DRM_INFO("This is an early pre-production Haswell machine. "
+ "It may not be fully functional.\n");
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
}
- dev_priv->regs = ioremap(base, size);
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
+
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
- dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base,
- dev->agp->agp_info.aper_size * 1024*1024);
- if (dev_priv->mm.gtt_mapping == NULL) {
- ret = -EIO;
- goto out_rmmap;
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
+ intel_uncore_init(dev);
+
+ ret = i915_gem_gtt_init(dev);
+ if (ret)
+ goto out_regs;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_gtt;
+ }
+
+ i915_kick_out_firmware_fb(dev_priv);
}
- /* Set up a WC MTRR for non-PAT systems. This is more common than
- * one would think, because the kernel disables PAT on first
- * generation Core chips because WC PAT gets overridden by a UC
- * MTRR if present. Even if a UC MTRR isn't present.
+ pci_set_master(dev->pdev);
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
*/
- dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024,
- MTRR_TYPE_WRCOMB, 1);
- if (dev_priv->mm.gtt_mtrr < 0) {
- DRM_INFO("MTRR allocation failed. Graphics "
- "performance may suffer.\n");
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+ aperture_size = dev_priv->gtt.mappable_end;
+
+ dev_priv->gtt.mappable =
+ io_mapping_create_wc(dev_priv->gtt.mappable_base,
+ aperture_size);
+ if (dev_priv->gtt.mappable == NULL) {
+ ret = -EIO;
+ goto out_gtt;
}
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
- if (ret)
- goto out_iomapfree;
+ dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ aperture_size);
- dev_priv->wq = create_singlethread_workqueue("i915");
+ /* The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and recording error state.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time. Use an ordered one.
+ */
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
- goto out_iomapfree;
+ goto out_mtrrfree;
}
- /* enable GEM by default */
- dev_priv->has_gem = 1;
+ intel_irq_init(dev);
+ intel_uncore_sanitize(dev);
- if (prealloc_size > agp_size * 3 / 4) {
- DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
- "memory stolen.\n",
- prealloc_size / 1024, agp_size / 1024);
- DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
- "updating the BIOS to fix).\n");
- dev_priv->has_gem = 0;
- }
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+ intel_setup_gmbus(dev);
+ intel_opregion_setup(dev);
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
- dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
- dev->driver->get_vblank_counter = gm45_get_vblank_counter;
- }
+ intel_setup_bios(dev);
i915_gem_load(dev);
- /* Init HWS */
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_phys_hws(dev);
- if (ret != 0)
- goto out_workqueue_free;
- }
-
- i915_get_mem_freq(dev);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1469,46 +1743,69 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->user_irq_lock);
- spin_lock_init(&dev_priv->error_lock);
- dev_priv->user_irq_refcount = 0;
- dev_priv->trace_irq_seqno = 0;
-
- ret = drm_vblank_init(dev, I915_NUM_PIPE);
+ intel_device_info_runtime_init(dev);
- if (ret) {
- (void) i915_driver_unload(dev);
- return ret;
+ if (INTEL_INFO(dev)->num_pipes) {
+ ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+ if (ret)
+ goto out_gem_unload;
}
- /* Start out suspended */
- dev_priv->mm.suspended = 1;
+ intel_power_domains_init(dev_priv);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_start,
- prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto out_workqueue_free;
+ goto out_power_well;
}
+ } else {
+ /* Start out suspended in ums mode. */
+ dev_priv->ums.mm_suspended = 1;
}
- /* Must be done after probing outputs */
- intel_opregion_init(dev, 0);
+ i915_setup_sysfs(dev);
+
+ if (INTEL_INFO(dev)->num_pipes) {
+ /* Must be done after probing outputs */
+ intel_opregion_init(dev);
+ acpi_video_register();
+ }
+
+ if (IS_GEN5(dev))
+ intel_gpu_ips_init(dev_priv);
+
+ intel_init_runtime_pm(dev_priv);
- setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
- (unsigned long) dev);
return 0;
-out_workqueue_free:
+out_power_well:
+ intel_power_domains_remove(dev_priv);
+ drm_vblank_cleanup(dev);
+out_gem_unload:
+ WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+ unregister_shrinker(&dev_priv->mm.shrinker);
+
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->wq);
-out_iomapfree:
- io_mapping_free(dev_priv->mm.gtt_mapping);
-out_rmmap:
- iounmap(dev_priv->regs);
+out_mtrrfree:
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
+ io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+out_regs:
+ intel_uncore_fini(dev);
+ pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
+ if (dev_priv->slab)
+ kmem_cache_destroy(dev_priv->slab);
kfree(dev_priv);
return ret;
}
@@ -1516,73 +1813,110 @@ free_priv:
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- destroy_workqueue(dev_priv->wq);
- del_timer_sync(&dev_priv->hangcheck_timer);
-
- io_mapping_free(dev_priv->mm.gtt_mapping);
- if (dev_priv->mm.gtt_mtrr >= 0) {
- mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
- dev_priv->mm.gtt_mtrr = -1;
+ ret = i915_gem_suspend(dev);
+ if (ret) {
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ return ret;
}
+ intel_fini_runtime_pm(dev_priv);
+
+ intel_gpu_ips_teardown();
+
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_remove(dev_priv);
+
+ i915_teardown_sysfs(dev);
+
+ WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+ unregister_shrinker(&dev_priv->mm.shrinker);
+
+ io_mapping_free(dev_priv->gtt.mappable);
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
+
+ acpi_video_unregister();
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fbdev_fini(dev);
+ intel_modeset_cleanup(dev);
+ cancel_work_sync(&dev_priv->console_resume_work);
+
/*
* free the memory space allocated for the child device
* config parsed from VBT
*/
- if (dev_priv->child_dev && dev_priv->child_dev_num) {
- kfree(dev_priv->child_dev);
- dev_priv->child_dev = NULL;
- dev_priv->child_dev_num = 0;
+ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
}
- drm_irq_uninstall(dev);
+
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
+ /* Free error state after interrupts are fully disabled. */
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_work_sync(&dev_priv->gpu_error.work);
+ i915_destroy_error_state(dev);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
- if (dev_priv->regs != NULL)
- iounmap(dev_priv->regs);
-
- intel_opregion_free(dev, 0);
+ intel_opregion_fini(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
- i915_gem_free_all_phys_object(dev);
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_fini(dev);
+ WARN_ON(dev_priv->mm.aliasing_ppgtt);
mutex_unlock(&dev->struct_mutex);
- drm_mm_takedown(&dev_priv->vram);
- i915_gem_lastclose(dev);
+ i915_gem_cleanup_stolen(dev);
- intel_cleanup_overlay(dev);
+ if (!I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
}
+ WARN_ON(!list_empty(&dev_priv->vm_list));
+
+ drm_vblank_cleanup(dev);
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
+
+ destroy_workqueue(dev_priv->wq);
+ pm_qos_remove_request(&dev_priv->pm_qos);
+
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+
+ intel_uncore_fini(dev);
+ if (dev_priv->regs != NULL)
+ pci_iounmap(dev->pdev, dev_priv->regs);
+
+ if (dev_priv->slab)
+ kmem_cache_destroy(dev_priv->slab);
+
pci_dev_put(dev_priv->bridge_dev);
- kfree(dev->dev_private);
+ kfree(dev_priv);
return 0;
}
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- i915_file_priv = (struct drm_i915_file_private *)
- kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
-
- if (!i915_file_priv)
- return -ENOMEM;
-
- file_priv->driver_priv = i915_file_priv;
+ int ret;
- INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+ ret = i915_gem_open(dev, file);
+ if (ret)
+ return ret;
return 0;
}
@@ -1595,97 +1929,107 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
- * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
void i915_driver_lastclose(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* On gen6+ we refuse to init without kms enabled, but then the drm core
+ * goes right around and calls lastclose. Check for this and don't clean
+ * up anything. */
+ if (!dev_priv)
+ return;
- if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_fb_helper_restore();
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fbdev_restore_mode(dev);
+ vga_switcheroo_process_delayed_switch();
return;
}
i915_gem_lastclose(dev);
- if (dev_priv->agp_heap)
- i915_mem_takedown(&(dev_priv->agp_heap));
-
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+ mutex_unlock(&dev->struct_mutex);
}
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
- kfree(i915_file_priv);
+ if (file_priv && file_priv->bsd_ring)
+ file_priv->bsd_ring = NULL;
+ kfree(file_priv);
}
-struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
- DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+const struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
-int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i9x5 is AGP.
+/*
+ * This is really ugly: Because old userspace abused the linux agp interface to
+ * manage the gtt, we need to claim that all intel devices are agp. For
+ * otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index be631cc3e4d..651e65e051c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,142 +28,375 @@
*/
#include <linux/device.h>
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
#include <linux/console.h>
-#include "drm_crtc_helper.h"
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
-static int i915_modeset = -1;
-module_param_named(modeset, i915_modeset, int, 0400);
+static struct drm_driver driver;
-unsigned int i915_fbpercrtc = 0;
-module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+#define GEN_DEFAULT_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+ .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
+ .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+#define GEN_CHV_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ CHV_PIPE_C_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ CHV_TRANSCODER_C_OFFSET, }, \
+ .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
+ CHV_DPLL_C_OFFSET }, \
+ .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
+ CHV_DPLL_C_MD_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+ CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+
+static const struct intel_device_info intel_i830_info = {
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
-unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+static const struct intel_device_info intel_845g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
-static struct drm_driver driver;
+static const struct intel_device_info intel_i85x_info = {
+ .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
-#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_CLASS_DISPLAY_VGA << 8, \
- .class_mask = 0xffff00, \
- .vendor = 0x8086, \
- .device = id, \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID, \
- .driver_data = (unsigned long) info }
+static const struct intel_device_info intel_i865g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
-const static struct intel_device_info intel_i830_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+static const struct intel_device_info intel_i915g_info = {
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i915gm_info = {
+ .gen = 3, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945g_info = {
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945gm_info = {
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_845g_info = {
- .is_i8xx = 1,
+static const struct intel_device_info intel_i965g_info = {
+ .gen = 4, .is_broadwater = 1, .num_pipes = 2,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+static const struct intel_device_info intel_i965gm_info = {
+ .gen = 4, .is_crestline = 1, .num_pipes = 2,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i865g_info = {
- .is_i8xx = 1,
+static const struct intel_device_info intel_g33_info = {
+ .gen = 3, .is_g33 = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i915g_info = {
- .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+static const struct intel_device_info intel_g45_info = {
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i915gm_info = {
- .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
- .cursor_needs_physical = 1,
+
+static const struct intel_device_info intel_gm45_info = {
+ .gen = 4, .is_g4x = 1, .num_pipes = 2,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i945g_info = {
- .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+
+static const struct intel_device_info intel_pineview_info = {
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i945gm_info = {
- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
- .has_hotplug = 1, .cursor_needs_physical = 1,
+
+static const struct intel_device_info intel_ironlake_d_info = {
+ .gen = 5, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i965g_info = {
- .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+static const struct intel_device_info intel_ironlake_m_info = {
+ .gen = 5, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_i965gm_info = {
- .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+static const struct intel_device_info intel_sandybridge_d_info = {
+ .gen = 6, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_g33_info = {
- .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1,
+static const struct intel_device_info intel_sandybridge_m_info = {
+ .gen = 6, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_g45_info = {
- .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+#define GEN7_FEATURES \
+ .gen = 7, .num_pipes = 3, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_fbc = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_llc = 1
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_gm45_info = {
- .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+static const struct intel_device_info intel_ivybridge_m_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .is_mobile = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_pineview_info = {
- .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+static const struct intel_device_info intel_ivybridge_q_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .num_pipes = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_ironlake_d_info = {
- .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+static const struct intel_device_info intel_valleyview_m_info = {
+ GEN7_FEATURES,
+ .is_mobile = 1,
+ .num_pipes = 2,
+ .is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_fbc = 0, /* legal, last one wins */
+ .has_llc = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
-const static struct intel_device_info intel_ironlake_m_info = {
- .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+static const struct intel_device_info intel_valleyview_d_info = {
+ GEN7_FEATURES,
+ .num_pipes = 2,
+ .is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_fbc = 0, /* legal, last one wins */
+ .has_llc = 0, /* legal, last one wins */
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+ GEN7_FEATURES,
+ .is_haswell = 1,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ GEN7_FEATURES,
+ .is_haswell = 1,
+ .is_mobile = 1,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_d_info = {
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+ .gen = 8, .is_mobile = 1, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
-const static struct pci_device_id pciidlist[] = {
- INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
- INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
- INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
- INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
- INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
- INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
- INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
- INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
- INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
- INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
- INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
- INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
- INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
- INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
- INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
- INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
- INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+static const struct intel_device_info intel_broadwell_gt3d_info = {
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_gt3m_info = {
+ .gen = 8, .is_mobile = 1, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+ .is_preliminary = 1,
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_CHV_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+ INTEL_I830_IDS(&intel_i830_info), \
+ INTEL_I845G_IDS(&intel_845g_info), \
+ INTEL_I85X_IDS(&intel_i85x_info), \
+ INTEL_I865G_IDS(&intel_i865g_info), \
+ INTEL_I915G_IDS(&intel_i915g_info), \
+ INTEL_I915GM_IDS(&intel_i915gm_info), \
+ INTEL_I945G_IDS(&intel_i945g_info), \
+ INTEL_I945GM_IDS(&intel_i945gm_info), \
+ INTEL_I965G_IDS(&intel_i965g_info), \
+ INTEL_G33_IDS(&intel_g33_info), \
+ INTEL_I965GM_IDS(&intel_i965gm_info), \
+ INTEL_GM45_IDS(&intel_gm45_info), \
+ INTEL_G45_IDS(&intel_g45_info), \
+ INTEL_PINEVIEW_IDS(&intel_pineview_info), \
+ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
+ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
+ INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
+ INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
+ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+ INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
+ INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
+ INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+ INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+ INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
+ INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
+ INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
+ INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
+ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
+ INTEL_CHV_IDS(&intel_cherryview_info)
+
+static const struct pci_device_id pciidlist[] = { /* aka */
+ INTEL_PCI_IDS,
{0, 0, 0}
};
@@ -171,32 +404,182 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pch = NULL;
- if (!dev || !dev_priv) {
- DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
+ /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+ * (which really amounts to a PCH but no South Display).
+ */
+ if (INTEL_INFO(dev)->num_pipes == 0) {
+ dev_priv->pch_type = PCH_NOP;
+ return;
}
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan trhough
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = id;
+
+ if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_IBX;
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev));
+ } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+ /* PantherPoint is CPT compatible */
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ WARN_ON(IS_ULT(dev));
+ } else if (IS_BROADWELL(dev)) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->pch_id =
+ INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ DRM_DEBUG_KMS("This is Broadwell, assuming "
+ "LynxPoint LP PCH\n");
+ } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ WARN_ON(!IS_ULT(dev));
+ } else
+ continue;
+
+ break;
+ }
+ }
+ if (!pch)
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
+}
+
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return false;
+
+ if (i915.semaphores >= 0)
+ return i915.semaphores;
+
+ /* Until we get further testing... */
+ if (IS_GEN8(dev))
+ return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+static int i915_drm_freeze(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* ignore lid events during suspend */
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_SUSPENDED;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ /* We do a lot of poking in a lot of registers, make sure they work
+ * properly. */
+ intel_display_set_init_power(dev_priv, true);
+
+ drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (i915_gem_idle(dev))
+ int error;
+
+ error = i915_gem_suspend(dev);
+ if (error) {
dev_err(&dev->pdev->dev,
- "GEM idle failed, resume may fail\n");
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
+
drm_irq_uninstall(dev);
+ dev_priv->enable_hotplug_processing = false;
+
+ intel_disable_gt_powersave(dev);
+
+ /*
+ * Disable CRTCs directly since we want to preserve sw state
+ * for _thaw.
+ */
+ drm_modeset_lock_all(dev);
+ for_each_crtc(dev, crtc) {
+ dev_priv->display.crtc_disable(crtc);
+ }
+ drm_modeset_unlock_all(dev);
+
+ intel_modeset_suspend_hw(dev);
}
+ i915_gem_suspend_gtt_mappings(dev);
+
i915_save_state(dev);
- intel_opregion_free(dev, 1);
+ intel_opregion_fini(dev);
+ intel_uncore_fini(dev);
+
+ console_lock();
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
+ console_unlock();
+
+ dev_priv->suspend_count++;
+
+ return 0;
+}
+
+int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+ int error;
+
+ if (!dev || !dev->dev_private) {
+ DRM_ERROR("dev: %p\n", dev);
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ error = i915_drm_freeze(dev);
+ if (error)
+ return error;
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
@@ -204,51 +587,160 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
+ return 0;
+}
+
+void intel_console_resume(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ console_resume_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ console_lock();
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
+ console_unlock();
+}
+
+static int i915_drm_thaw_early(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_uncore_early_sanitize(dev);
+ intel_uncore_sanitize(dev);
+ intel_power_domains_init_hw(dev_priv);
return 0;
}
-static int i915_resume(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
- if (pci_enable_device(dev->pdev))
- return -1;
- pci_set_master(dev->pdev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ restore_gtt_mappings) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
i915_restore_state(dev);
-
- intel_opregion_init(dev, 1);
+ intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
+ intel_init_pch_refclk(dev);
+ drm_mode_config_reset(dev);
- ret = i915_gem_init_ringbuffer(dev);
- if (ret != 0)
- ret = -1;
+ mutex_lock(&dev->struct_mutex);
+ if (i915_gem_init_hw(dev)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ }
mutex_unlock(&dev->struct_mutex);
- drm_irq_install(dev);
+ /* We need working interrupts for modeset enabling ... */
+ drm_irq_install(dev, dev->pdev->irq);
+
+ intel_modeset_init_hw(dev);
+
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
+
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
+ intel_hpd_init(dev);
+ dev_priv->enable_hotplug_processing = true;
+ /* Config may have changed between suspend and resume */
+ drm_helper_hpd_irq_event(dev);
}
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Resume the modeset for every activated CRTC */
- drm_helper_resume_force_mode(dev);
+
+ intel_opregion_init(dev);
+
+ /*
+ * The console lock can be pretty contented on resume due
+ * to all the printk activity. Try to keep it out of the hot
+ * path of resume if possible.
+ */
+ if (console_trylock()) {
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
+ console_unlock();
+ } else {
+ schedule_work(&dev_priv->console_resume_work);
}
- dev_priv->modeset_on_lid = 0;
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_DONE;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
- return ret;
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_check_and_clear_faults(dev);
+
+ return __i915_drm_thaw(dev, true);
+}
+
+static int i915_resume_early(struct drm_device *dev)
+{
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ /*
+ * We have a resume ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an early
+ * resume hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ pci_set_master(dev->pdev);
+
+ return i915_drm_thaw_early(dev);
+}
+
+int i915_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /*
+ * Platforms with opregion should have sane BIOS, older ones (gen3 and
+ * earlier) need to restore the GTT mappings since the BIOS might clear
+ * all our scratch PTEs.
+ */
+ ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int i915_resume_legacy(struct drm_device *dev)
+{
+ i915_resume_early(dev);
+ i915_resume(dev);
+
+ return 0;
}
/**
- * i965_reset - reset chip after a hang
+ * i915_reset - reset chip after a hang
* @dev: drm device to reset
- * @flags: reset domains
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
@@ -261,53 +753,38 @@ static int i915_resume(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i965_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned long timeout;
- u8 gdrst;
- /*
- * We really should only reset the display subsystem if we actually
- * need to
- */
- bool need_display = true;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool simulated;
+ int ret;
+
+ if (!i915.reset)
+ return 0;
mutex_lock(&dev->struct_mutex);
- /*
- * Clear request list
- */
- i915_gem_retire_requests(dev);
+ i915_gem_reset(dev);
- if (need_display)
- i915_save_display(dev);
+ simulated = dev_priv->gpu_error.stop_rings != 0;
- if (IS_I965G(dev) || IS_G4X(dev)) {
- /*
- * Set the domains we want to reset, then the reset bit (bit 0).
- * Clear the reset bit after a while and wait for hardware status
- * bit (bit 1) to be set
- */
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
- udelay(50);
- pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
-
- /* ...we don't want to loop forever though, 500ms should be plenty */
- timeout = jiffies + msecs_to_jiffies(500);
- do {
- udelay(100);
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- } while ((gdrst & 0x1) && time_after(timeout, jiffies));
-
- if (gdrst & 0x1) {
- WARN(true, "i915: Failed to reset chip\n");
- mutex_unlock(&dev->struct_mutex);
- return -EIO;
+ ret = intel_gpu_reset(dev);
+
+ /* Also reset the gpu hangman. */
+ if (simulated) {
+ DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->gpu_error.stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_INFO("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
}
- } else {
- DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
- return -ENODEV;
+ }
+
+ if (ret) {
+ DRM_ERROR("Failed to reset chip: %i\n", ret);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
/* Ok, now get things going again... */
@@ -325,54 +802,61 @@ int i965_reset(struct drm_device *dev, u8 flags)
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->mm.suspended) {
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
- struct drm_gem_object *obj = ring->ring_obj;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- dev_priv->mm.suspended = 0;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
+ !dev_priv->ums.mm_suspended) {
+ dev_priv->ums.mm_suspended = 0;
+
+ ret = i915_gem_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("Failed hw init on reset %d\n", ret);
+ return ret;
}
+ /*
+ * FIXME: This races pretty badly against concurrent holders of
+ * ring interrupts. This is possible since we've started to drop
+ * dev->struct_mutex in select places when waiting for the gpu.
+ */
+
+ /*
+ * rps/rc6 re-init is necessary to restore state lost after the
+ * reset and the re-install of gt irqs. Skip for ironlake per
+ * previous concerns that it doesn't respond well to some forms
+ * of re-init after reset.
+ */
+ if (INTEL_INFO(dev)->gen > 5)
+ intel_reset_gt_powersave(dev);
+
+ intel_hpd_init(dev);
+ } else {
mutex_unlock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- drm_irq_install(dev);
- mutex_lock(&dev->struct_mutex);
}
- /*
- * Display needs restore too...
- */
- if (need_display)
- i915_restore_display(dev);
-
- mutex_unlock(&dev->struct_mutex);
return 0;
}
-
-static int __devinit
-i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ struct intel_device_info *intel_info =
+ (struct intel_device_info *) ent->driver_data;
+
+ if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
+ DRM_INFO("This hardware requires preliminary hardware support.\n"
+ "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ return -ENODEV;
+ }
+
+ /* Only bind to function 0 of the device. Early generations
+ * used function 1 as a placeholder for multi-head. This causes
+ * us confusion instead, especially on the systems where both
+ * functions have the same PCI-ID!
+ */
+ if (PCI_FUNC(pdev->devfn))
+ return -ENODEV;
+
+ driver.driver_features &= ~(DRIVER_USE_AGP);
+
+ return drm_get_pci_dev(pdev, ent, &driver);
}
static void
@@ -383,131 +867,679 @@ i915_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
-i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int i915_pm_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
- return i915_suspend(dev, state);
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_freeze(drm_dev);
}
-static int
-i915_pci_resume(struct pci_dev *pdev)
+static int i915_pm_suspend_late(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ /*
+ * We have a suspedn ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an late
+ * suspend hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
- return i915_resume(dev);
+ return 0;
}
-static int
-i915_pm_suspend(struct device *dev)
+static int i915_pm_resume_early(struct device *dev)
{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_resume_early(drm_dev);
}
-static int
-i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *dev)
{
- return i915_pci_resume(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_resume(drm_dev);
+}
+
+static int i915_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ return i915_drm_freeze(drm_dev);
+}
+
+static int i915_pm_thaw_early(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw_early(drm_dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw(drm_dev);
+}
+
+static int i915_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_freeze(drm_dev);
+}
+
+static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
+{
+ hsw_enable_pc8(dev_priv);
+
+ return 0;
+}
+
+static int snb_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ intel_init_pch_refclk(dev);
+
+ return 0;
+}
+
+static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ hsw_disable_pc8(dev_priv);
+
+ return 0;
+}
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ * used internally by the HW in a way that doesn't depend
+ * keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ * These have no effect on the driver's operation, so we don't save/restore
+ * them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ * the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ int i;
+
+ /* GAM 0x4000-0x4770 */
+ s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
+ s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
+ s->arb_mode = I915_READ(ARB_MODE);
+ s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
+ s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+
+ s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+
+ s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
+ s->ecochk = I915_READ(GAM_ECOCHK);
+ s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
+ s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
+
+ s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ s->g3dctl = I915_READ(VLV_G3DCTL);
+ s->gsckgctl = I915_READ(VLV_GSCKGCTL);
+ s->mbctl = I915_READ(GEN6_MBCTL);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
+ s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
+ s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
+ s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
+ s->rstctl = I915_READ(GEN6_RSTCTL);
+ s->misccpctl = I915_READ(GEN7_MISCCPCTL);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ s->gfxpause = I915_READ(GEN6_GFXPAUSE);
+ s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
+ s->rpdeuc = I915_READ(GEN6_RPDEUC);
+ s->ecobus = I915_READ(ECOBUS);
+ s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
+ s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
+ s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
+ s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
+ s->rcedata = I915_READ(VLV_RCEDATA);
+ s->spare2gh = I915_READ(VLV_SPAREG2H);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ s->gt_imr = I915_READ(GTIMR);
+ s->gt_ier = I915_READ(GTIER);
+ s->pm_imr = I915_READ(GEN6_PMIMR);
+ s->pm_ier = I915_READ(GEN6_PMIER);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ s->tilectl = I915_READ(TILECTL);
+ s->gt_fifoctl = I915_READ(GTFIFOCTL);
+ s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
+ s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ s->pmwgicz = I915_READ(VLV_PMWGICZ);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
+ s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
+ s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
+
+ /*
+ * Not saving any of:
+ * DFT, 0x9800-0x9EC0
+ * SARB, 0xB000-0xB1FC
+ * GAC, 0x5208-0x524C, 0x14000-0x14C000
+ * PCI CFG
+ */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ u32 val;
+ int i;
+
+ /* GAM 0x4000-0x4770 */
+ I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
+ I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
+ I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
+ I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
+ I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ I915_WRITE(GAM_ECOCHK, s->ecochk);
+ I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
+ I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
+
+ I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ I915_WRITE(VLV_G3DCTL, s->g3dctl);
+ I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
+ I915_WRITE(GEN6_MBCTL, s->mbctl);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
+ I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
+ I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
+ I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
+ I915_WRITE(GEN6_RSTCTL, s->rstctl);
+ I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
+ I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
+ I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
+ I915_WRITE(ECOBUS, s->ecobus);
+ I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
+ I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
+ I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
+ I915_WRITE(VLV_RCEDATA, s->rcedata);
+ I915_WRITE(VLV_SPAREG2H, s->spare2gh);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ I915_WRITE(GTIMR, s->gt_imr);
+ I915_WRITE(GTIER, s->gt_ier);
+ I915_WRITE(GEN6_PMIMR, s->pm_imr);
+ I915_WRITE(GEN6_PMIER, s->pm_ier);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ I915_WRITE(TILECTL, s->tilectl);
+ I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
+ /*
+ * Preserve the GT allow wake and GFX force clock bit, they are not
+ * be restored, as they are used to control the s0ix suspend/resume
+ * sequence by the caller.
+ */
+ val = I915_READ(VLV_GTLC_WAKE_CTRL);
+ val &= VLV_GTLC_ALLOWWAKEREQ;
+ val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+
+ val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ val &= VLV_GFX_CLK_FORCE_ON_BIT;
+ val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
+
+ I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
+ I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
+}
+
+int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
+{
+ u32 val;
+ int err;
+
+ val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
+
+#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
+ /* Wait for a previous force-off to settle */
+ if (force_on) {
+ err = wait_for(!COND, 20);
+ if (err) {
+ DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG));
+ return err;
+ }
+ }
+
+ val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+ if (force_on)
+ val |= VLV_GFX_CLK_FORCE_ON_BIT;
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
+
+ if (!force_on)
+ return 0;
+
+ err = wait_for(COND, 20);
+ if (err)
+ DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG));
+
+ return err;
+#undef COND
+}
+
+static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
+{
+ u32 val;
+ int err = 0;
+
+ val = I915_READ(VLV_GTLC_WAKE_CTRL);
+ val &= ~VLV_GTLC_ALLOWWAKEREQ;
+ if (allow)
+ val |= VLV_GTLC_ALLOWWAKEREQ;
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+ POSTING_READ(VLV_GTLC_WAKE_CTRL);
+
+#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
+ allow)
+ err = wait_for(COND, 1);
+ if (err)
+ DRM_ERROR("timeout disabling GT waking\n");
+ return err;
+#undef COND
+}
+
+static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
+{
+ u32 mask;
+ u32 val;
+ int err;
+
+ mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+ val = wait_for_on ? mask : 0;
+#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
+ if (COND)
+ return 0;
+
+ DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
+ wait_for_on ? "on" : "off",
+ I915_READ(VLV_GTLC_PW_STATUS));
+
+ /*
+ * RC6 transitioning can be delayed up to 2 msec (see
+ * valleyview_enable_rps), use 3 msec for safety.
+ */
+ err = wait_for(COND, 3);
+ if (err)
+ DRM_ERROR("timeout waiting for GT wells to go %s\n",
+ wait_for_on ? "on" : "off");
+
+ return err;
+#undef COND
}
-static int
-i915_pm_freeze(struct device *dev)
+static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
+ if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+ return;
+
+ DRM_ERROR("GT register access while GT waking disabled\n");
+ I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
-static int
-i915_pm_thaw(struct device *dev)
+static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
{
- /* thaw during hibernate, do nothing! */
+ u32 mask;
+ int err;
+
+ /*
+ * Bspec defines the following GT well on flags as debug only, so
+ * don't treat them as hard failures.
+ */
+ (void)vlv_wait_for_gt_wells(dev_priv, false);
+
+ mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+ WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+ vlv_check_no_gt_access(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, true);
+ if (err)
+ goto err1;
+
+ err = vlv_allow_gt_wake(dev_priv, false);
+ if (err)
+ goto err2;
+ vlv_save_gunit_s0ix_state(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (err)
+ goto err2;
+
return 0;
+
+err2:
+ /* For safety always re-enable waking and disable gfx clock forcing */
+ vlv_allow_gt_wake(dev_priv, true);
+err1:
+ vlv_force_gfx_clock(dev_priv, false);
+
+ return err;
}
-static int
-i915_pm_poweroff(struct device *dev)
+static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
+ struct drm_device *dev = dev_priv->dev;
+ int err;
+ int ret;
+
+ /*
+ * If any of the steps fail just try to continue, that's the best we
+ * can do at this point. Return the first error code (which will also
+ * leave RPM permanently disabled).
+ */
+ ret = vlv_force_gfx_clock(dev_priv, true);
+
+ vlv_restore_gunit_s0ix_state(dev_priv);
+
+ err = vlv_allow_gt_wake(dev_priv, true);
+ if (!ret)
+ ret = err;
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (!ret)
+ ret = err;
+
+ vlv_check_no_gt_access(dev_priv);
+
+ intel_init_clock_gating(dev);
+ i915_gem_restore_fences(dev);
+
+ return ret;
}
-static int
-i915_pm_restore(struct device *dev)
+static int intel_runtime_suspend(struct device *device)
{
- return i915_pci_resume(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+ return -ENODEV;
+
+ WARN_ON(!HAS_RUNTIME_PM(dev));
+ assert_force_wake_inactive(dev_priv);
+
+ DRM_DEBUG_KMS("Suspending device\n");
+
+ /*
+ * We could deadlock here in case another thread holding struct_mutex
+ * calls RPM suspend concurrently, since the RPM suspend will wait
+ * first for this RPM suspend to finish. In this case the concurrent
+ * RPM resume will be followed by its RPM suspend counterpart. Still
+ * for consistency return -EAGAIN, which will reschedule this suspend.
+ */
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
+ /*
+ * Bump the expiration timestamp, otherwise the suspend won't
+ * be rescheduled.
+ */
+ pm_runtime_mark_last_busy(device);
+
+ return -EAGAIN;
+ }
+ /*
+ * We are safe here against re-faults, since the fault handler takes
+ * an RPM reference.
+ */
+ i915_gem_release_all_mmaps(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * rps.work can't be rearmed here, since we get here only after making
+ * sure the GPU is idle and the RPS freq is set to the minimum. See
+ * intel_mark_idle().
+ */
+ cancel_work_sync(&dev_priv->rps.work);
+ intel_runtime_pm_disable_interrupts(dev);
+
+ if (IS_GEN6(dev)) {
+ ret = 0;
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = hsw_runtime_suspend(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
+ ret = vlv_runtime_suspend(dev_priv);
+ } else {
+ ret = -ENODEV;
+ WARN_ON(1);
+ }
+
+ if (ret) {
+ DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_runtime_pm_restore_interrupts(dev);
+
+ return ret;
+ }
+
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ dev_priv->pm.suspended = true;
+
+ /*
+ * current versions of firmware which depend on this opregion
+ * notification have repurposed the D1 definition to mean
+ * "runtime suspended" vs. what you would normally expect (D3)
+ * to distinguish it from notifications that might be sent
+ * via the suspend path.
+ */
+ intel_opregion_notify_adapter(dev, PCI_D1);
+
+ DRM_DEBUG_KMS("Device suspended\n");
+ return 0;
+}
+
+static int intel_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ WARN_ON(!HAS_RUNTIME_PM(dev));
+
+ DRM_DEBUG_KMS("Resuming device\n");
+
+ intel_opregion_notify_adapter(dev, PCI_D0);
+ dev_priv->pm.suspended = false;
+
+ if (IS_GEN6(dev)) {
+ ret = snb_runtime_resume(dev_priv);
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = hsw_runtime_resume(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
+ ret = vlv_runtime_resume(dev_priv);
+ } else {
+ WARN_ON(1);
+ ret = -ENODEV;
+ }
+
+ /*
+ * No point of rolling back things in case of an error, as the best
+ * we can do is to hope that things will still work (and disable RPM).
+ */
+ i915_gem_init_swizzling(dev);
+ gen6_update_ring_freq(dev);
+
+ intel_runtime_pm_restore_interrupts(dev);
+ intel_reset_gt_powersave(dev);
+
+ if (ret)
+ DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+ else
+ DRM_DEBUG_KMS("Device resumed\n");
+
+ return ret;
}
-const struct dev_pm_ops i915_pm_ops = {
- .suspend = i915_pm_suspend,
- .resume = i915_pm_resume,
- .freeze = i915_pm_freeze,
- .thaw = i915_pm_thaw,
- .poweroff = i915_pm_poweroff,
- .restore = i915_pm_restore,
+static const struct dev_pm_ops i915_pm_ops = {
+ .suspend = i915_pm_suspend,
+ .suspend_late = i915_pm_suspend_late,
+ .resume_early = i915_pm_resume_early,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw_early = i915_pm_thaw_early,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore_early = i915_pm_resume_early,
+ .restore = i915_pm_resume,
+ .runtime_suspend = intel_runtime_suspend,
+ .runtime_resume = intel_runtime_resume,
};
-static struct vm_operations_struct i915_gem_vm_ops = {
+static const struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
- /* don't use mtrr's here, the Xserver or user space app should
- * deal with them for intel hardware.
+ /* Don't use MTRRs here; the Xserver or userspace app should
+ * deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_USE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
+
+ /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
.suspend = i915_suspend,
- .resume = i915_resume,
+ .resume = i915_resume_legacy,
+
.device_is_agp = i915_driver_device_is_agp,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
- .irq_handler = i915_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
- .gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
- .ioctls = i915_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = i915_compat_ioctl,
-#endif
- },
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = i915_pci_probe,
- .remove = i915_pci_remove,
- .driver.pm = &i915_pm_ops,
- },
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = i915_gem_prime_export,
+ .gem_prime_import = i915_gem_prime_import,
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .ioctls = i915_ioctls,
+ .fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -516,12 +1548,18 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+
static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
- i915_gem_shrinker_init();
-
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@@ -532,24 +1570,37 @@ static int __init i915_init(void)
* the default behavior.
*/
#if defined(CONFIG_DRM_I915_KMS)
- if (i915_modeset != 0)
+ if (i915.modeset != 0)
driver.driver_features |= DRIVER_MODESET;
#endif
- if (i915_modeset == 1)
+ if (i915.modeset == 1)
driver.driver_features |= DRIVER_MODESET;
#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && i915_modeset == -1)
+ if (vgacon_text_force() && i915.modeset == -1)
driver.driver_features &= ~DRIVER_MODESET;
#endif
- return drm_init(&driver);
+ if (!(driver.driver_features & DRIVER_MODESET)) {
+ driver.get_vblank_timestamp = NULL;
+#ifndef CONFIG_DRM_I915_UMS
+ /* Silently fail loading to not upset userspace. */
+ DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
+ return 0;
+#endif
+ }
+
+ return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
- i915_gem_shrinker_exit();
- drm_exit(&driver);
+#ifndef CONFIG_DRM_I915_UMS
+ if (!(driver.driver_features & DRIVER_MODESET))
+ return; /* Never loaded a driver. */
+#endif
+
+ drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 29dd6762696..374f964323a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,9 +30,21 @@
#ifndef _I915_DRV_H_
#define _I915_DRV_H_
+#include <uapi/drm/i915_drm.h>
+
#include "i915_reg.h"
#include "intel_bios.h"
+#include "intel_ringbuffer.h"
+#include "i915_gem_gtt.h"
#include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/intel-gtt.h>
+#include <linux/backlight.h>
+#include <linux/hashtable.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
+#include <linux/pm_qos.h>
/* General customization:
*/
@@ -44,16 +56,183 @@
#define DRIVER_DATE "20080730"
enum pipe {
+ INVALID_PIPE = -1,
PIPE_A = 0,
PIPE_B,
+ PIPE_C,
+ _PIPE_EDP,
+ I915_MAX_PIPES = _PIPE_EDP
+};
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ I915_MAX_TRANSCODERS
};
+#define transcoder_name(t) ((t) + 'A')
enum plane {
PLANE_A = 0,
PLANE_B,
+ PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
+
+enum port {
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
+#define I915_NUM_PHYS_VLV 2
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
};
-#define I915_NUM_PIPE 2
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1
+};
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_PORT_DDI_A_2_LANES,
+ POWER_DOMAIN_PORT_DDI_A_4_LANES,
+ POWER_DOMAIN_PORT_DDI_B_2_LANES,
+ POWER_DOMAIN_PORT_DDI_B_4_LANES,
+ POWER_DOMAIN_PORT_DDI_C_2_LANES,
+ POWER_DOMAIN_PORT_DDI_C_4_LANES,
+ POWER_DOMAIN_PORT_DDI_D_2_LANES,
+ POWER_DOMAIN_PORT_DDI_D_4_LANES,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_NUM_PINS
+};
+
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
+
+#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
+#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ if ((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ if ((intel_connector)->base.encoder == (__encoder))
+
+struct drm_i915_private;
+struct i915_mmu_object;
+
+enum intel_dpll_id {
+ DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
+ /* real shared dpll ids must be >= 0 */
+ DPLL_ID_PCH_PLL_A,
+ DPLL_ID_PCH_PLL_B,
+};
+#define I915_NUM_PLLS 2
+
+struct intel_dpll_hw_state {
+ uint32_t dpll;
+ uint32_t dpll_md;
+ uint32_t fp0;
+ uint32_t fp1;
+};
+
+struct intel_shared_dpll {
+ int refcount; /* count of number of CRTCs sharing this PLL */
+ int active; /* count of number of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ const char *name;
+ /* should match the index in the dev_priv->shared_dplls array */
+ enum intel_dpll_id id;
+ struct intel_dpll_hw_state hw_state;
+ void (*mode_set)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state);
+};
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n);
+
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
/* Interface history:
*
@@ -69,43 +248,8 @@ enum plane {
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-#define WATCH_COHERENCY 0
-#define WATCH_BUF 0
-#define WATCH_EXEC 0
-#define WATCH_LRU 0
-#define WATCH_RELOC 0
-#define WATCH_INACTIVE 0
-#define WATCH_PWRITE 0
-
-#define I915_GEM_PHYS_CURSOR_0 1
-#define I915_GEM_PHYS_CURSOR_1 2
-#define I915_GEM_PHYS_OVERLAY_REGS 3
-#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
-
-struct drm_i915_gem_phys_object {
- int id;
- struct page **page_list;
- drm_dma_handle_t *handle;
- struct drm_gem_object *cur_obj;
-};
-
-typedef struct _drm_i915_ring_buffer {
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
- struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- int start;
- int size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
+#define WATCH_LISTS 0
+#define WATCH_GTT 0
struct opregion_header;
struct opregion_acpi;
@@ -113,201 +257,424 @@ struct opregion_swsci;
struct opregion_asle;
struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- struct opregion_asle *asle;
- int enabled;
+ struct opregion_header __iomem *header;
+ struct opregion_acpi __iomem *acpi;
+ struct opregion_swsci __iomem *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle __iomem *asle;
+ void __iomem *vbt;
+ u32 __iomem *lid_state;
+ struct work_struct asle_work;
};
+#define OPREGION_SIZE (8*1024)
+
+struct intel_overlay;
+struct intel_overlay_error_state;
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_fence_reg {
- struct drm_gem_object *obj;
+ struct list_head lru_list;
+ struct drm_i915_gem_object *obj;
+ int pin_count;
};
struct sdvo_device_mapping {
+ u8 initialized;
u8 dvo_port;
u8 slave_addr;
u8 dvo_wiring;
- u8 initialized;
+ u8 i2c_pin;
+ u8 ddc_pin;
};
+struct intel_display_error_state;
+
struct drm_i915_error_state {
+ struct kref ref;
+ struct timeval time;
+
+ char error_msg[128];
+ u32 reset_count;
+ u32 suspend_count;
+
+ /* Generic register state */
u32 eir;
u32 pgtbl_er;
- u32 pipeastat;
- u32 pipebstat;
- u32 ipeir;
- u32 ipehr;
- u32 instdone;
- u32 acthd;
- u32 instpm;
- u32 instps;
- u32 instdone1;
- u32 seqno;
- struct timeval time;
+ u32 ier;
+ u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
+ u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
+ u32 done_reg;
+ u32 gac_eco;
+ u32 gam_ecochk;
+ u32 gab_ctl;
+ u32 gfx_mode;
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+
+ struct drm_i915_error_ring {
+ bool valid;
+ /* Software tracked state */
+ bool waiting;
+ int hangcheck_score;
+ enum intel_ring_hangcheck_action hangcheck_action;
+ int num_requests;
+
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head;
+ u32 cpu_ring_tail;
+
+ u32 semaphore_seqno[I915_NUM_RINGS - 1];
+
+ /* Register state */
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 instdone;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u32 seqno;
+ u64 bbaddr;
+ u64 acthd;
+ u32 fault_reg;
+ u64 faddr;
+ u32 rc_psmi; /* sleep state */
+ u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
+ struct drm_i915_error_request {
+ long jiffies;
+ u32 seqno;
+ u32 tail;
+ } *requests;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
+
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+ } ring[I915_NUM_RINGS];
+ struct drm_i915_error_buffer {
+ u32 size;
+ u32 name;
+ u32 rseqno, wseqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ u32 userptr:1;
+ s32 ring:4;
+ u32 cache_level:3;
+ } **active_bo, **pinned_bo;
+
+ u32 *active_bo_count, *pinned_bo_count;
};
+struct intel_connector;
+struct intel_crtc_config;
+struct intel_plane_config;
+struct intel_crtc;
+struct intel_limit;
+struct dpll;
+
struct drm_i915_display_funcs {
- void (*dpms)(struct drm_crtc *crtc, int mode);
- bool (*fbc_enabled)(struct drm_crtc *crtc);
- void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ bool (*fbc_enabled)(struct drm_device *dev);
+ void (*enable_fbc)(struct drm_crtc *crtc);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
- void (*update_wm)(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size);
+ /**
+ * find_dpll() - Find the best values for the PLL
+ * @limit: limits for the PLL
+ * @crtc: current CRTC
+ * @target: target frequency in kHz
+ * @refclk: reference clock frequency in kHz
+ * @match_clock: if provided, @best_clock P divider must
+ * match the P divider from @match_clock
+ * used for LVDS downclocking
+ * @best_clock: best PLL values found
+ *
+ * Returns true on success, false on failure.
+ */
+ bool (*find_dpll)(const struct intel_limit *limit,
+ struct drm_crtc *crtc,
+ int target, int refclk,
+ struct dpll *match_clock,
+ struct dpll *best_clock);
+ void (*update_wm)(struct drm_crtc *crtc);
+ void (*update_sprite_wm)(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enable, bool scaled);
+ void (*modeset_global_resources)(struct drm_device *dev);
+ /* Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state. */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_config *);
+ void (*get_plane_config)(struct intel_crtc *,
+ struct intel_plane_config *);
+ int (*crtc_mode_set)(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
+ void (*off)(struct drm_crtc *crtc);
+ void (*write_eld)(struct drm_connector *connector,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode);
+ void (*fdi_link_train)(struct drm_crtc *crtc);
+ void (*init_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags);
+ void (*update_primary_plane)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y);
+ void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
- /* clock gating init */
-};
-struct intel_overlay;
-
-struct intel_device_info {
- u8 is_mobile : 1;
- u8 is_i8xx : 1;
- u8 is_i915g : 1;
- u8 is_i9xx : 1;
- u8 is_i945gm : 1;
- u8 is_i965g : 1;
- u8 is_i965gm : 1;
- u8 is_g33 : 1;
- u8 need_gfx_hws : 1;
- u8 is_g4x : 1;
- u8 is_pineview : 1;
- u8 is_ironlake : 1;
- u8 has_fbc : 1;
- u8 has_rc6 : 1;
- u8 has_pipe_cxsr : 1;
- u8 has_hotplug : 1;
- u8 cursor_needs_physical : 1;
-};
-
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- const struct intel_device_info *info;
-
- int has_gem;
+ int (*setup_backlight)(struct intel_connector *connector);
+ uint32_t (*get_backlight)(struct intel_connector *connector);
+ void (*set_backlight)(struct intel_connector *connector,
+ uint32_t level);
+ void (*disable_backlight)(struct intel_connector *connector);
+ void (*enable_backlight)(struct intel_connector *connector);
+};
- void __iomem *regs;
+struct intel_uncore_funcs {
+ void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ int fw_engine);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ int fw_engine);
+
+ uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+ uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+ void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+ uint8_t val, bool trace);
+ void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+ uint16_t val, bool trace);
+ void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+ uint32_t val, bool trace);
+ void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+ uint64_t val, bool trace);
+};
- struct pci_dev *bridge_dev;
- drm_i915_ring_buffer_t ring;
+struct intel_uncore {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
- drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
- dma_addr_t dma_status_page;
- uint32_t counter;
- unsigned int status_gfx_addr;
- drm_local_map_t hws_map;
- struct drm_gem_object *hws_obj;
- struct drm_gem_object *pwrctx;
+ struct intel_uncore_funcs funcs;
- struct resource mch_res;
+ unsigned fifo_count;
+ unsigned forcewake_count;
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
+ unsigned fw_rendercount;
+ unsigned fw_mediacount;
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- /** Protects user_irq_refcount and irq_mask_reg */
- spinlock_t user_irq_lock;
- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
- int user_irq_refcount;
- u32 trace_irq_seqno;
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask_reg;
- u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on Ironlake,
- irq_mask_reg is still used for display irq. */
- u32 gt_irq_mask_reg;
- u32 gt_irq_enable_reg;
- u32 de_irq_enable_reg;
- u32 pch_irq_mask_reg;
- u32 pch_irq_enable_reg;
-
- u32 hotplug_supported_mask;
- struct work_struct hotplug_work;
+ struct timer_list force_wake_timer;
+};
- int tex_lru_log_granularity;
- int allow_batchbuffer;
- struct mem_block *agp_heap;
- unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int vblank_pipe;
+#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
+ func(is_mobile) sep \
+ func(is_i85x) sep \
+ func(is_i915g) sep \
+ func(is_i945gm) sep \
+ func(is_g33) sep \
+ func(need_gfx_hws) sep \
+ func(is_g4x) sep \
+ func(is_pineview) sep \
+ func(is_broadwater) sep \
+ func(is_crestline) sep \
+ func(is_ivybridge) sep \
+ func(is_valleyview) sep \
+ func(is_haswell) sep \
+ func(is_preliminary) sep \
+ func(has_fbc) sep \
+ func(has_pipe_cxsr) sep \
+ func(has_hotplug) sep \
+ func(cursor_needs_physical) sep \
+ func(has_overlay) sep \
+ func(overlay_needs_physical) sep \
+ func(supports_tv) sep \
+ func(has_llc) sep \
+ func(has_ddi) sep \
+ func(has_fpga_dbg)
+
+#define DEFINE_FLAG(name) u8 name:1
+#define SEP_SEMICOLON ;
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd;
+struct intel_device_info {
+ u32 display_mmio_offset;
+ u8 num_pipes:3;
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 gen;
+ u8 ring_mask; /* Rings supported by the HW */
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+ /* Register offsets for the various display pipes and transcoders */
+ int pipe_offsets[I915_MAX_TRANSCODERS];
+ int trans_offsets[I915_MAX_TRANSCODERS];
+ int dpll_offsets[I915_MAX_PIPES];
+ int dpll_md_offsets[I915_MAX_PIPES];
+ int palette_offsets[I915_MAX_PIPES];
+ int cursor_offsets[I915_MAX_PIPES];
+};
- struct drm_mm vram;
+#undef DEFINE_FLAG
+#undef SEP_SEMICOLON
+
+enum i915_cache_level {
+ I915_CACHE_NONE = 0,
+ I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
+ I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
+ caches, eg sampler/render caches, and the
+ large Last-Level-Cache. LLC is coherent with
+ the CPU, but L3 is only visible to the GPU. */
+ I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
+};
- unsigned long cfb_size;
- unsigned long cfb_pitch;
- int cfb_fence;
- int cfb_plane;
+struct i915_ctx_hang_stats {
+ /* This context had batch pending when hang was declared */
+ unsigned batch_pending;
- int irq_enabled;
+ /* This context had batch active when hang was declared */
+ unsigned batch_active;
- struct intel_opregion opregion;
+ /* Time when this context was last blamed for a GPU reset */
+ unsigned long guilty_ts;
- /* overlay */
- struct intel_overlay *overlay;
+ /* This context is banned to submit more work */
+ bool banned;
+};
- /* LVDS info */
- int backlight_duty_cycle; /* restore backlight to this value */
- bool panel_wants_dither;
- struct drm_display_mode *panel_fixed_mode;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_ID 0
+struct intel_context {
+ struct kref ref;
+ int id;
+ bool is_initialized;
+ uint8_t remap_slice;
+ struct drm_i915_file_private *file_priv;
+ struct intel_engine_cs *last_ring;
+ struct drm_i915_gem_object *obj;
+ struct i915_ctx_hang_stats hang_stats;
+ struct i915_address_space *vm;
+
+ struct list_head link;
+};
- /* Feature bits from the VBIOS */
- unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int edp_support:1;
- int lvds_ssc_freq;
+struct i915_fbc {
+ unsigned long size;
+ unsigned int fb_id;
+ enum plane plane;
+ int y;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
+ struct intel_fbc_work {
+ struct delayed_work work;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ } *fbc_work;
+
+ enum no_fbc_reason {
+ FBC_OK, /* FBC is enabled */
+ FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+ FBC_MODULE_PARAM,
+ FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+ } no_fbc_reason;
+};
- struct notifier_block lid_notifier;
+struct i915_drrs {
+ struct intel_connector *connector;
+};
- int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
- struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+struct i915_psr {
+ bool sink_support;
+ bool source_ok;
+};
- unsigned int fsb_freq, mem_freq;
+enum intel_pch {
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+ PCH_LPT, /* Lynxpoint PCH */
+ PCH_NOP,
+};
- spinlock_t error_lock;
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct workqueue_struct *wq;
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
- /* Display functions */
- struct drm_i915_display_funcs display;
+#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
+
+struct intel_fbdev;
+struct intel_fbc_work;
+
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ u32 force_bit;
+ u32 reg0;
+ u32 gpio_reg;
+ struct i2c_algo_bit_data bit_algo;
+ struct drm_i915_private *dev_priv;
+};
- /* Register state */
- bool modeset_on_lid;
+struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -341,6 +708,7 @@ typedef struct drm_i915_private {
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
+ u32 saveBLC_HIST_CTL_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@@ -386,11 +754,7 @@ typedef struct drm_i915_private {
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
- u32 saveDPFC_CB_BASE;
- u32 saveFBC_CFB_BASE;
- u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL;
- u32 saveFBC_CONTROL2;
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
@@ -412,7 +776,7 @@ typedef struct drm_i915_private {
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
- uint64_t saveFENCE[16];
+ uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
@@ -451,235 +815,909 @@ typedef struct drm_i915_private {
u32 savePIPEB_DATA_N1;
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
+ u32 saveMCHBAR_RENDER_STANDBY;
+ u32 savePCH_PORT_HOTPLUG;
+};
+
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 clock_gate_dis2;
+};
+
+struct intel_gen6_power_mgmt {
+ /* work and pm_iir are protected by dev_priv->irq_lock */
+ struct work_struct work;
+ u32 pm_iir;
+
+ /* Frequencies are stored in potentially platform dependent multiples.
+ * In other words, *_freq needs to be multiplied by X to be interesting.
+ * Soft limits are those which are used for the dynamic reclocking done
+ * by the driver (raise frequencies under heavy loads, and lower for
+ * lighter loads). Hard limits are those imposed by the hardware.
+ *
+ * A distinction is made for overclocking, which is never enabled by
+ * default, and is considered to be above the hard limit if it's
+ * possible at all.
+ */
+ u8 cur_freq; /* Current frequency (cached, may not == HW) */
+ u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+ u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
+ u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
+ u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp0_freq; /* Non-overclocked max frequency. */
+
+ int last_adj;
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+ bool enabled;
+ struct delayed_work delayed_resume_work;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct mutex hw_lock;
+};
+
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+struct drm_i915_private;
+struct i915_power_well;
+
+struct i915_power_well_ops {
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+};
+
+/* Power well structure for haswell */
+struct i915_power_well {
+ const char *name;
+ bool always_on;
+ /* power well enable/disable usage count */
+ int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
+ unsigned long domains;
+ unsigned long data;
+ const struct i915_power_well_ops *ops;
+};
+
+struct i915_power_domains {
+ /*
+ * Power wells needed for initialization at driver init and suspend
+ * time are on. They are kept on until after the first modeset.
+ */
+ bool init_power_on;
+ bool initializing;
+ int power_well_count;
+
+ struct mutex lock;
+ int domain_use_count[POWER_DOMAIN_NUM];
+ struct i915_power_well *power_wells;
+};
+
+struct i915_dri1_state {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ uint32_t counter;
+};
+
+struct i915_ums_state {
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int mm_suspended;
+};
+
+#define MAX_L3_SLICES 2
+struct intel_l3_parity {
+ u32 *remap_info[MAX_L3_SLICES];
+ struct work_struct error_work;
+ int which_slice;
+};
+
+struct i915_gem_mm {
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm stolen;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
+ struct notifier_block oom_notifier;
+ struct shrinker shrinker;
+ bool shrinker_no_lock_stealing;
+
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ /**
+ * When we detect an idle GPU, we want to turn on
+ * powersaving features. So once we see that there
+ * are no more requests outstanding and no more
+ * arrive within a small period of time, we fire
+ * off the idle_work.
+ */
+ struct delayed_work idle_work;
+
+ /**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
+ * Is the GPU currently considered idle, or busy executing userspace
+ * requests? Whilst idle, we attempt to power down the hardware and
+ * display clocks. In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ bool busy;
+
+ /* the indicator for dispatch video commands on two BSD rings */
+ int bsd_ring_dispatch_index;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+
+ /* accounting, useful for userland debugging */
+ spinlock_t object_stat_lock;
+ size_t object_memory;
+ u32 object_count;
+};
+
+struct drm_i915_error_state_buf {
+ unsigned bytes;
+ unsigned size;
+ int err;
+ u8 *buf;
+ loff_t start;
+ loff_t pos;
+};
+
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
+struct i915_gpu_error {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ /* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
+ struct timer_list hangcheck_timer;
+
+ /* For reset and error_state handling. */
+ spinlock_t lock;
+ /* Protected by the above dev->gpu_error.lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct work;
+
+
+ unsigned long missed_irq_rings;
+
+ /**
+ * State variable controlling the reset flow and count
+ *
+ * This is a counter which gets incremented when reset is triggered,
+ * and again when reset has been handled. So odd values (lowest bit set)
+ * means that reset is in progress and even values that
+ * (reset_counter >> 1):th reset was successfully completed.
+ *
+ * If reset is not completed succesfully, the I915_WEDGE bit is
+ * set meaning that hardware is terminally sour and there is no
+ * recovery. All waiters on the reset_queue will be woken when
+ * that happens.
+ *
+ * This counter is used by the wait_seqno code to notice that reset
+ * event happened and it needs to restart the entire ioctl (since most
+ * likely the seqno it waited for won't ever signal anytime soon).
+ *
+ * This is important for lock-free wait paths, where no contended lock
+ * naturally enforces the correct ordering between the bail-out of the
+ * waiter and the gpu reset work code.
+ */
+ atomic_t reset_counter;
+
+#define I915_RESET_IN_PROGRESS_FLAG 1
+#define I915_WEDGED (1 << 31)
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t reset_queue;
+
+ /* Userspace knobs for gpu hang simulation;
+ * combines both a ring mask, and extra flags
+ */
+ u32 stop_rings;
+#define I915_STOP_RING_ALLOW_BAN (1 << 31)
+#define I915_STOP_RING_ALLOW_WARN (1 << 30)
+
+ /* For missed irq/seqno simulation. */
+ unsigned int test_irq_rings;
+};
+
+enum modeset_restore {
+ MODESET_ON_LID_OPEN,
+ MODESET_DONE,
+ MODESET_SUSPENDED,
+};
+
+struct ddi_vbt_port_info {
+ uint8_t hdmi_level_shift;
+
+ uint8_t supports_dvi:1;
+ uint8_t supports_hdmi:1;
+ uint8_t supports_dp:1;
+};
+
+enum drrs_support_type {
+ DRRS_NOT_SUPPORTED = 0,
+ STATIC_DRRS_SUPPORT = 1,
+ SEAMLESS_DRRS_SUPPORT = 2
+};
+
+struct intel_vbt_data {
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ unsigned int has_mipi:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+
+ enum drrs_support_type drrs_type;
+
+ /* eDP */
+ int edp_rate;
+ int edp_lanes;
+ int edp_preemphasis;
+ int edp_vswing;
+ bool edp_initialized;
+ bool edp_support;
+ int edp_bpp;
+ struct edp_power_seq edp_pps;
struct {
- struct drm_mm gtt_space;
+ u16 pwm_freq_hz;
+ bool present;
+ bool active_low_pwm;
+ } backlight;
- struct io_mapping *gtt_mapping;
- int gtt_mtrr;
+ /* MIPI DSI */
+ struct {
+ u16 port;
+ u16 panel_id;
+ struct mipi_config *config;
+ struct mipi_pps_data *pps;
+ u8 seq_version;
+ u32 size;
+ u8 *data;
+ u8 *sequence[MIPI_SEQ_MAX];
+ } dsi;
+
+ int crt_ddc_pin;
- /**
- * Membership on list of all loaded devices, used to evict
- * inactive buffers under memory pressure.
- *
- * Modifications should only be done whilst holding the
- * shrink_list_lock spinlock.
- */
- struct list_head shrink_list;
-
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- spinlock_t active_list_lock;
- struct list_head active_list;
-
- /**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
+ int child_dev_num;
+ union child_device_config *child_dev;
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
+ struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
+};
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
+enum intel_ddb_partitioning {
+ INTEL_DDB_PART_1_2,
+ INTEL_DDB_PART_5_6, /* IVB+ */
+};
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
+struct intel_wm_level {
+ bool enable;
+ uint32_t pri_val;
+ uint32_t spr_val;
+ uint32_t cur_val;
+ uint32_t fbc_val;
+};
- uint32_t next_gem_seqno;
+struct ilk_wm_values {
+ uint32_t wm_pipe[3];
+ uint32_t wm_lp[3];
+ uint32_t wm_lp_spr[3];
+ uint32_t wm_linetime[3];
+ bool enable_fbc_wm;
+ enum intel_ddb_partitioning partitioning;
+};
- /**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
+/*
+ * This struct helps tracking the state needed for runtime PM, which puts the
+ * device in PCI D3 state. Notice that when this happens, nothing on the
+ * graphics device works, even register access, so we don't get interrupts nor
+ * anything else.
+ *
+ * Every piece of our code that needs to actually touch the hardware needs to
+ * either call intel_runtime_pm_get or call intel_display_power_get with the
+ * appropriate power domain.
+ *
+ * Our driver uses the autosuspend delay feature, which means we'll only really
+ * suspend if we stay with zero refcount for a certain amount of time. The
+ * default value is currently very conservative (see intel_init_runtime_pm), but
+ * it can be changed with the standard runtime PM files from sysfs.
+ *
+ * The irqs_disabled variable becomes true exactly after we disable the IRQs and
+ * goes back to false exactly before we reenable the IRQs. We use this variable
+ * to check if someone is trying to enable/disable IRQs while they're supposed
+ * to be disabled. This shouldn't happen and we'll print some error messages in
+ * case it happens.
+ *
+ * For more, read the Documentation/power/runtime_pm.txt.
+ */
+struct i915_runtime_pm {
+ bool suspended;
+ bool irqs_disabled;
+};
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occuring and makes
- * every pending request fail
- */
- atomic_t wedged;
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PF,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
- /** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
+struct intel_pipe_crc_entry {
+ uint32_t frame;
+ uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ bool opened; /* exclusive access to the result file */
+ struct intel_pipe_crc_entry *entries;
+ enum intel_pipe_crc_source source;
+ int head, tail;
+ wait_queue_head_t wq;
+};
+
+struct drm_i915_private {
+ struct drm_device *dev;
+ struct kmem_cache *slab;
+
+ const struct intel_device_info info;
+
+ int relative_constants_mode;
+
+ void __iomem *regs;
+
+ struct intel_uncore uncore;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ /* MMIO base address for MIPI regs */
+ uint32_t mipi_mmio_base;
+
+ wait_queue_head_t gmbus_wait_queue;
+
+ struct pci_dev *bridge_dev;
+ struct intel_engine_cs ring[I915_NUM_RINGS];
+ uint32_t last_seqno, next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ struct resource mch_res;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+
+ bool display_irqs_enabled;
+
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
+ /* DPIO indirect register protection */
+ struct mutex dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ union {
+ u32 irq_mask;
+ u32 de_irq_mask[I915_MAX_PIPES];
+ };
+ u32 gt_irq_mask;
+ u32 pm_irq_mask;
+ u32 pm_rps_events;
+ u32 pipestat_irq_mask[I915_MAX_PIPES];
+
+ struct work_struct hotplug_work;
+ bool enable_hotplug_processing;
+ struct {
+ unsigned long hpd_last_jiffies;
+ int hpd_cnt;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } hpd_mark;
+ } hpd_stats[HPD_NUM_PINS];
+ u32 hpd_event_bits;
+ struct timer_list hotplug_reenable_timer;
+
+ struct i915_fbc fbc;
+ struct i915_drrs drrs;
+ struct intel_opregion opregion;
+ struct intel_vbt_data vbt;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+
+ /* backlight registers and fields in struct intel_panel */
+ spinlock_t backlight_lock;
+
+ /* LVDS info */
+ bool no_aux_handshake;
+
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+ unsigned int vlv_cdclk_freq;
+
+ /**
+ * wq - Driver workqueue for GEM.
+ *
+ * NOTE: Work items scheduled here are not allowed to grab any modeset
+ * locks, for otherwise the flushing done in the pageflip code will
+ * result in deadlocks.
+ */
+ struct workqueue_struct *wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ unsigned long quirks;
+
+ enum modeset_restore modeset_restore;
+ struct mutex modeset_restore_lock;
+
+ struct list_head vm_list; /* Global list of all address spaces */
+ struct i915_gtt gtt; /* VM representing the global address space */
+
+ struct i915_gem_mm mm;
+#if defined(CONFIG_MMU_NOTIFIER)
+ DECLARE_HASHTABLE(mmu_notifiers, 7);
+#endif
+
+ /* Kernel Modesetting */
- /* storage for physical objects */
- struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
- } mm;
struct sdvo_device_mapping sdvo_mappings[2];
- /* indicate whether the LVDS_BORDER should be enabled or not */
- unsigned int lvds_border_bits;
- struct drm_crtc *plane_to_crtc_mapping[2];
- struct drm_crtc *pipe_to_crtc_mapping[2];
+ struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+ struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
+ int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
+
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
- struct work_struct idle_work;
- struct timer_list idle_timer;
- bool busy;
u16 orig_clock;
- int child_dev_num;
- struct child_device_config *child_dev;
- struct drm_connector *int_lvds_connector;
-} drm_i915_private_t;
-/** driver private structure attached to each drm_gem_object */
+ bool mchbar_need_disable;
+
+ struct intel_l3_parity l3_parity;
+
+ /* Cannot be determined by PCIID. You must always read a register. */
+ size_t ellc_size;
+
+ /* gen6+ rps state */
+ struct intel_gen6_power_mgmt rps;
+
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct intel_ilk_power_mgmt ips;
+
+ struct i915_power_domains power_domains;
+
+ struct i915_psr psr;
+
+ struct i915_gpu_error gpu_error;
+
+ struct drm_i915_gem_object *vlv_pctx;
+
+#ifdef CONFIG_DRM_I915_FBDEV
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
+#endif
+
+ /*
+ * The console may be contended at resume, but we don't
+ * want it to block on it.
+ */
+ struct work_struct console_resume_work;
+
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+
+ uint32_t hw_context_size;
+ struct list_head context_list;
+
+ u32 fdi_rx_config;
+
+ u32 suspend_count;
+ struct i915_suspend_saved_registers regfile;
+ struct vlv_s0ix_state vlv_s0ix_state;
+
+ struct {
+ /*
+ * Raw watermark latency values:
+ * in 0.1us units for WM0,
+ * in 0.5us units for WM1+.
+ */
+ /* primary */
+ uint16_t pri_latency[5];
+ /* sprite */
+ uint16_t spr_latency[5];
+ /* cursor */
+ uint16_t cur_latency[5];
+
+ /* current hardware state */
+ struct ilk_wm_values hw;
+ } wm;
+
+ struct i915_runtime_pm pm;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct i915_dri1_state dri1;
+ /* Old ums support infrastructure, same warning applies. */
+ struct i915_ums_state ums;
+
+ /*
+ * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
+ * will be rejected. Instead look for a better place.
+ */
+};
+
+static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+#define I915_GTT_OFFSET_NONE ((u32)-1)
+
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
+};
+
struct drm_i915_gem_object {
- struct drm_gem_object *obj;
+ struct drm_gem_object base;
- /** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *gtt_space;
+ const struct drm_i915_gem_object_ops *ops;
- /** This object's place on the active/flushing/inactive lists */
- struct list_head list;
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
- /** This object's place on the fenced object LRU */
- struct list_head fence_list;
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ struct list_head global_list;
+
+ struct list_head ring_list;
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
/**
- * This is set if the object is on the active or flushing lists
- * (has pending rendering), and is not set if it's on inactive (ready
- * to be unbound).
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
*/
- int active;
+ unsigned int active:1;
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
- int dirty;
+ unsigned int dirty:1;
- /** AGP memory structure for our GTT binding. */
- DRM_AGP_MEM *agp_mem;
+ /**
+ * Fence register bits (if any) for this object. Will be set
+ * as needed when mapped into the GTT.
+ * Protected by dev->struct_mutex.
+ */
+ signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
- struct page **pages;
- int pages_refcount;
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
/**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
+ * Current tiling mode for the object.
+ */
+ unsigned int tiling_mode:2;
+ /**
+ * Whether the tiling parameters for the currently associated fence
+ * register have changed. Note that for the purposes of tracking
+ * tiling changes we also treat the unfenced register, the register
+ * slot that the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
*/
- uint32_t gtt_offset;
+ unsigned int fence_dirty:1;
/**
- * Fake offset for use by mmap(2)
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
*/
- uint64_t mmap_offset;
+ unsigned int map_and_fenceable:1;
/**
- * Fence register bits (if any) for this object. Will be set
- * as needed when mapped into the GTT.
- * Protected by dev->struct_mutex.
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
*/
- int fence_reg;
+ unsigned int fault_mappable:1;
+ unsigned int pin_mappable:1;
+ unsigned int pin_display:1;
- /** How many users have pinned this object in GTT space */
- int pin_count;
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access:1;
+ unsigned int fenced_gpu_access:1;
+
+ unsigned int cache_level:3;
+
+ unsigned int has_aliasing_ppgtt_mapping:1;
+ unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
+
+ struct sg_table *pages;
+ int pages_pin_count;
+
+ /* prime dma-buf support */
+ void *dma_buf_vmapping;
+ int vmapping_count;
+
+ struct intel_engine_cs *ring;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ uint32_t last_fenced_seqno;
- /** Current tiling mode for the object. */
- uint32_t tiling_mode;
+ /** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
- /** Record of address bit 17 of each page at last unbind. */
- long *bit_17;
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
- /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
- uint32_t agp_type;
-
- /**
- * If present, while GEM_DOMAIN_CPU is in the read domain this array
- * flags which individual pages are valid.
- */
- uint8_t *page_cpu_valid;
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
/** User space pin count and filp owning the pin */
- uint32_t user_pin_count;
+ unsigned long user_pin_count;
struct drm_file *pin_filp;
/** for phy allocated objects */
- struct drm_i915_gem_phys_object *phys_obj;
-
- /**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- int in_execbuffer;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- int madv;
-
- /**
- * Number of crtcs where this object is currently the fb, but
- * will be page flipped away on the next vblank. When it
- * reaches 0, dev_priv->pending_flip_queue will be woken up.
- */
- atomic_t pending_flip;
+ drm_dma_handle_t *phys_handle;
+
+ union {
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+ unsigned workers :4;
+#define I915_GEM_USERPTR_MAX_WORKERS 15
+
+ struct mm_struct *mm;
+ struct i915_mmu_object *mn;
+ struct work_struct *work;
+ } userptr;
+ };
};
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@@ -692,43 +1730,321 @@ struct drm_i915_gem_object {
* an emission time with seqnos for tracking how far ahead of the GPU we are.
*/
struct drm_i915_gem_request {
+ /** On Which ring this request was generated */
+ struct intel_engine_cs *ring;
+
/** GEM sequence number associated with this request. */
uint32_t seqno;
+ /** Position in the ringbuffer of the start of the request */
+ u32 head;
+
+ /** Position in the ringbuffer of the end of the request */
+ u32 tail;
+
+ /** Context related to this request */
+ struct intel_context *ctx;
+
+ /** Batch buffer related to this request if any */
+ struct drm_i915_gem_object *batch_obj;
+
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
/** global list entry for this request */
struct list_head list;
+ struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
};
struct drm_i915_file_private {
+ struct drm_i915_private *dev_priv;
+ struct drm_file *file;
+
struct {
+ spinlock_t lock;
struct list_head request_list;
+ struct delayed_work idle_work;
} mm;
+ struct idr context_idr;
+
+ atomic_t rps_wait_boost;
+ struct intel_engine_cs *bsd_ring;
};
-enum intel_chip_family {
- CHIP_I8XX = 0x01,
- CHIP_I9XX = 0x02,
- CHIP_I915 = 0x04,
- CHIP_I965 = 0x08,
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+ /*
+ * Flags describing how the command parser processes the command.
+ *
+ * CMD_DESC_FIXED: The command has a fixed length if this is set,
+ * a length mask if not set
+ * CMD_DESC_SKIP: The command is allowed but does not follow the
+ * standard length encoding for the opcode range in
+ * which it falls
+ * CMD_DESC_REJECT: The command is never allowed
+ * CMD_DESC_REGISTER: The command should be checked against the
+ * register whitelist for the appropriate ring
+ * CMD_DESC_MASTER: The command is allowed if the submitting process
+ * is the DRM master
+ */
+ u32 flags;
+#define CMD_DESC_FIXED (1<<0)
+#define CMD_DESC_SKIP (1<<1)
+#define CMD_DESC_REJECT (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK (1<<4)
+#define CMD_DESC_MASTER (1<<5)
+
+ /*
+ * The command's unique identification bits and the bitmask to get them.
+ * This isn't strictly the opcode field as defined in the spec and may
+ * also include type, subtype, and/or subop fields.
+ */
+ struct {
+ u32 value;
+ u32 mask;
+ } cmd;
+
+ /*
+ * The command's length. The command is either fixed length (i.e. does
+ * not include a length field) or has a length field mask. The flag
+ * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+ * a length mask. All command entries in a command table must include
+ * length information.
+ */
+ union {
+ u32 fixed;
+ u32 mask;
+ } length;
+
+ /*
+ * Describes where to find a register address in the command to check
+ * against the ring's register whitelist. Only valid if flags has the
+ * CMD_DESC_REGISTER bit set.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ } reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+ /*
+ * Describes command checks where a particular dword is masked and
+ * compared against an expected value. If the command does not match
+ * the expected value, the parser rejects it. Only valid if flags has
+ * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+ * are valid.
+ *
+ * If the check specifies a non-zero condition_mask then the parser
+ * only performs the check when the bits specified by condition_mask
+ * are non-zero.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 expected;
+ u32 condition_offset;
+ u32 condition_mask;
+ } bits[MAX_CMD_DESC_BITMASKS];
};
-extern struct drm_ioctl_desc i915_ioctls[];
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each ring has an array of tables. Each table consists of an array of command
+ * descriptors, which must be sorted with command opcodes in ascending order.
+ */
+struct drm_i915_cmd_table {
+ const struct drm_i915_cmd_descriptor *table;
+ int count;
+};
+
+#define INTEL_INFO(dev) (&to_i915(dev)->info)
+
+#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
+#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
+#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
+#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
+ (dev)->pdev->device == 0x0152 || \
+ (dev)->pdev->device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
+ (dev)->pdev->device == 0x0106 || \
+ (dev)->pdev->device == 0x010A)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
+#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
+ ((dev)->pdev->device & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
+ (((dev)->pdev->device & 0xf) == 0x2 || \
+ ((dev)->pdev->device & 0xf) == 0x6 || \
+ ((dev)->pdev->device & 0xf) == 0xe))
+#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
+#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
+ ((dev)->pdev->device & 0x00F0) == 0x0020)
+/* ULX machines are also considered ULT. */
+#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
+ (dev)->pdev->device == 0x0A1E)
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+
+/*
+ * The genX designation typically refers to the render engine, so render
+ * capability related checks should use IS_GEN, while display and other checks
+ * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
+ * chips, etc.).
+ */
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
+#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
+
+#define RENDER_RING (1<<RCS)
+#define BSD_RING (1<<VCS)
+#define BLT_RING (1<<BCS)
+#define VEBOX_RING (1<<VECS)
+#define BSD2_RING (1<<VCS2)
+#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
+ to_i915(dev)->ellc_size)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
+ (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
+ && !IS_GEN8(dev))
+#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
+#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
+
+#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
+ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
+#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
+#include "i915_trace.h"
+
+extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern unsigned int i915_fbpercrtc;
-extern unsigned int i915_powersave;
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
+extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+extern int i915_resume(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
+/* i915_params.c */
+struct i915_params {
+ int modeset;
+ int panel_ignore_lid;
+ unsigned int powersave;
+ int semaphores;
+ unsigned int lvds_downclock;
+ int lvds_channel_mode;
+ int panel_use_ssc;
+ int vbt_sdvo_panel_type;
+ int enable_rc6;
+ int enable_fbc;
+ int enable_ppgtt;
+ int enable_psr;
+ unsigned int preliminary_hw_support;
+ int disable_power_well;
+ int enable_ips;
+ int invert_brightness;
+ int enable_cmd_parser;
+ /* leave bools at the end to not create holes */
+ bool enable_hangcheck;
+ bool fastboot;
+ bool prefault_disable;
+ bool reset;
+ bool disable_display;
+ bool disable_vtd_wa;
+};
+extern struct i915_params i915 __read_mostly;
+
/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
@@ -739,61 +2055,51 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#endif
extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4);
-extern int i965_reset(struct drm_device *dev, u8 flags);
+ struct drm_clip_rect *box,
+ int DR1, int DR4);
+extern int intel_gpu_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_device *dev);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+
+extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
-void i915_hangcheck_elapsed(unsigned long data);
-extern int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
-extern void i915_enable_interrupt (struct drm_device *dev);
-
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
-extern int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+void i915_queue_hangcheck(struct drm_device *dev);
+__printf(3, 4)
+void i915_handle_error(struct drm_device *dev, bool wedged,
+ const char *fmt, ...);
+
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
+ int new_delay);
+extern void intel_irq_init(struct drm_device *dev);
+extern void intel_hpd_init(struct drm_device *dev);
+
+extern void intel_uncore_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-
-void intel_enable_asle (struct drm_device *dev);
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
-/* i915_mem.c */
-extern int i915_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_init_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(struct drm_device * dev,
- struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -821,6 +2127,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -833,226 +2143,616 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_init_userptr(struct drm_device *dev);
+int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-int i915_gem_init_object(struct drm_gem_object *obj);
+void *i915_gem_object_alloc(struct drm_device *dev);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
+void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+void i915_gem_vma_destroy(struct i915_vma *vma);
+
+#define PIN_MAPPABLE 0x1
+#define PIN_NONBLOCK 0x2
+#define PIN_GLOBAL 0x4
+#define PIN_OFFSET_BIAS 0x8
+#define PIN_OFFSET_MASK (~4095)
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(struct work_struct *work);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ int *needs_clflush);
+
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+ struct sg_page_iter sg_iter;
+
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
+ return sg_page_iter_page(&sg_iter);
+
+ return NULL;
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages == NULL);
+ obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages_pin_count == 0);
+ obj->pages_pin_count--;
+}
+
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *to);
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_engine_cs *ring);
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_engine_cs *ring);
+
+bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
+ bool interruptible);
+static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+{
+ return unlikely(atomic_read(&error->reset_counter)
+ & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter) & I915_WEDGED;
+}
+
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+ return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+}
+
+static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gpu_error.stop_rings == 0 ||
+ dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
+}
+
+static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gpu_error.stop_rings == 0 ||
+ dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
+}
+
+void i915_gem_reset(struct drm_device *dev);
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- unsigned long end);
-int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
+int __i915_add_request(struct intel_engine_cs *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_object *batch_obj,
+ u32 *seqno);
+#define i915_add_request(ring, seqno) \
+ __i915_add_request(ring, NULL, NULL, seqno)
+int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- int write);
-int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj, int id);
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj);
-void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj);
-void i915_gem_object_put_pages(struct drm_gem_object *obj);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
-
-void i915_gem_shrinker_init(void);
-void i915_gem_shrinker_exit(void);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ struct intel_engine_cs *pipelined);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
+ int align);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced);
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->pin_count > 0)
+ return true;
+ return false;
+}
+
+/* Some GGTT VM helpers */
+#define obj_to_ggtt(obj) \
+ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+static inline bool i915_is_ggtt(struct i915_address_space *vm)
+{
+ struct i915_address_space *ggtt =
+ &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
+ return vm == ggtt;
+}
+
+static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+}
+
+static inline int __must_check
+i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ unsigned flags)
+{
+ return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
+}
+
+static inline int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+ return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
+}
+
+void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+
+/* i915_gem_context.c */
+#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
+int __must_check i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_reset(struct drm_device *dev);
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+int i915_gem_context_enable(struct drm_i915_private *dev_priv);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct intel_engine_cs *ring,
+ struct intel_context *to);
+struct intel_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
+void i915_gem_context_free(struct kref *ctx_ref);
+static inline void i915_gem_context_reference(struct intel_context *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+static inline void i915_gem_context_unreference(struct intel_context *ctx)
+{
+ kref_put(&ctx->ref, i915_gem_context_free);
+}
+
+static inline bool i915_gem_context_is_default(const struct intel_context *c)
+{
+ return c->id == DEFAULT_CONTEXT_ID;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* i915_gem_render_state.c */
+int i915_gem_render_state_init(struct intel_engine_cs *ring);
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
+ unsigned alignment,
+ unsigned cache_level,
+ unsigned long start,
+ unsigned long end,
+ unsigned flags);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+int i915_gem_evict_everything(struct drm_device *dev);
+
+/* belongs in i915_gem_gtt.h */
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gtt_chipset_flush();
+}
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+ u32 stolen_offset,
+ u32 gtt_offset,
+ u32 size);
+void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
+static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj->tiling_mode != I915_TILING_NONE;
+}
+
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
-bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
- int tiling_mode);
-bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
- const char *where, uint32_t mark);
-#if WATCH_INACTIVE
-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
#else
-#define i915_verify_inactive(dev, file, line)
+#define i915_verify_lists(dev) 0
#endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
- const char *where, uint32_t mark);
-void i915_dump_lru(struct drm_device *dev, const char *where);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
-/* i915_suspend.c */
-extern int i915_save_state(struct drm_device *dev);
-extern int i915_restore_state(struct drm_device *dev);
+/* i915_gpu_error.c */
+__printf(2, 3)
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+ const struct i915_error_state_file_priv *error);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+ size_t count, loff_t pos);
+static inline void i915_error_state_buf_release(
+ struct drm_i915_error_state_buf *eb)
+{
+ kfree(eb->buf);
+}
+void i915_capture_error_state(struct drm_device *dev, bool wedge,
+ const char *error_msg);
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv);
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+void i915_destroy_error_state(struct drm_device *dev);
+
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+const char *i915_cache_level_str(int type);
+
+/* i915_cmd_parser.c */
+int i915_cmd_parser_get_version(void);
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
+int i915_parse_cmds(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ bool is_master);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* i915_ums.c */
+void i915_save_display_reg(struct drm_device *dev);
+void i915_restore_display_reg(struct drm_device *dev);
+
+/* i915_sysfs.c */
+void i915_setup_sysfs(struct drm_device *dev_priv);
+void i915_teardown_sysfs(struct drm_device *dev_priv);
+
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+static inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+ return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+extern struct i2c_adapter *intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+struct intel_encoder;
#ifdef CONFIG_ACPI
-/* i915_opregion.c */
-extern int intel_opregion_init(struct drm_device *dev, int resume);
-extern void intel_opregion_free(struct drm_device *dev, int suspend);
-extern void opregion_asle_intr(struct drm_device *dev);
-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
-extern void opregion_enable_asle(struct drm_device *dev);
+extern int intel_opregion_setup(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+ pci_power_t state);
#else
-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+ return 0;
+}
#endif
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
+extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
+extern void intel_connector_unregister(struct intel_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void i8xx_disable_fbc(struct drm_device *dev);
-extern void g4x_disable_fbc(struct drm_device *dev);
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
+extern void i915_redisable_vga(struct drm_device *dev);
+extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void intel_init_pch_refclk(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void valleyview_set_rps(struct drm_device *dev, u8 val);
+extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
+extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+extern void intel_detect_pch(struct drm_device *dev);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* overlay */
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
*/
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file_priv); \
-} while (0)
-
-#define I915_READ(reg) readl(dev_priv->regs + (reg))
-#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
-#define I915_READ16(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
-#define I915_READ8(reg) readb(dev_priv->regs + (reg))
-#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
-#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
-#define I915_READ64(reg) readq(dev_priv->regs + (reg))
-#define POSTING_READ(reg) (void)I915_READ(reg)
-
-#define I915_VERBOSE 0
-
-#define RING_LOCALS volatile unsigned int *ring_virt__;
-
-#define BEGIN_LP_RING(n) do { \
- int bytes__ = 4*(n); \
- if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
- /* a wrap must occur between instructions so pad beforehand */ \
- if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
- i915_wrap_ring(dev); \
- if (unlikely (dev_priv->ring.space < bytes__)) \
- i915_wait_ring(dev, bytes__, __func__); \
- ring_virt__ = (unsigned int *) \
- (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
- dev_priv->ring.tail += bytes__; \
- dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
- dev_priv->ring.space -= bytes__; \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *ring_virt__++ = (n); \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
- I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \
-} while(0)
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
+/* intel_sideband.c */
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
+void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+#define FORCEWAKE_RENDER (1 << 0)
+#define FORCEWAKE_MEDIA (1 << 1)
+#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
+
+
+#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+
+#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+
+#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+
+/* Be very careful with read/write 64-bit values. On 32-bit machines, they
+ * will be implemented using 2 32-bit writes in an arbitrary order with
+ * an arbitrary delay between them. This can cause the hardware to
+ * act upon the intermediate value, possibly leading to corruption and
+ * machine death. You have been warned.
*/
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
-
-extern int i915_wrap_ring(struct drm_device * dev);
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
-#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
-#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
+
+#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
+ u32 upper = I915_READ(upper_reg); \
+ u32 lower = I915_READ(lower_reg); \
+ u32 tmp = I915_READ(upper_reg); \
+ if (upper != tmp) { \
+ upper = tmp; \
+ lower = I915_READ(lower_reg); \
+ WARN_ON(I915_READ(upper_reg) != upper); \
+ } \
+ (u64)upper << 32 | lower; })
+
+#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+
+/* "Broadcast RGB" property */
+#define INTEL_BROADCAST_RGB_AUTO 0
+#define INTEL_BROADCAST_RGB_FULL 1
+#define INTEL_BROADCAST_RGB_LIMITED 2
+
+static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
+{
+ if (HAS_PCH_SPLIT(dev))
+ return CPU_VGACNTRL;
+ else if (IS_VALLEYVIEW(dev))
+ return VLV_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long
+timespec_to_jiffies_timeout(const struct timespec *value)
+{
+ unsigned long j = timespec_to_jiffies(value);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2748609f05b..d893e4da5dc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -25,456 +25,626 @@
*
*/
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/oom.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
+
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+ bool force);
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly);
+static void
+i915_gem_object_retire(struct drm_i915_gem_object *obj);
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj);
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable);
+
+static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static int i915_gem_shrinker_oom(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr);
+static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+ enum i915_cache_level level)
+{
+ return HAS_LLC(dev) || level != I915_CACHE_NONE;
+}
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
-static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev, int min_size);
-static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv);
-
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
-
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- unsigned long end)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (start >= end ||
- (start & (PAGE_SIZE - 1)) != 0 ||
- (end & (PAGE_SIZE - 1)) != 0) {
- return -EINVAL;
+static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ return true;
+
+ return obj->pin_display;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ spin_lock(&dev_priv->mm.object_stat_lock);
+ dev_priv->mm.object_count++;
+ dev_priv->mm.object_memory += size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ spin_lock(&dev_priv->mm.object_stat_lock);
+ dev_priv->mm.object_count--;
+ dev_priv->mm.object_memory -= size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
+}
+
+static int
+i915_gem_wait_for_error(struct i915_gpu_error *error)
+{
+ int ret;
+
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+ i915_terminally_wedged(error))
+ if (EXIT_COND)
+ return 0;
+
+ /*
+ * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+ * userspace. If it takes that long something really bad is going on and
+ * we should simply try to bail out and fail as gracefully as possible.
+ */
+ ret = wait_event_interruptible_timeout(error->reset_queue,
+ EXIT_COND,
+ 10*HZ);
+ if (ret == 0) {
+ DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+ return -EIO;
+ } else if (ret < 0) {
+ return ret;
}
+#undef EXIT_COND
- drm_mm_init(&dev_priv->mm.gtt_space, start,
- end - start);
+ return 0;
+}
+
+int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ if (ret)
+ return ret;
- dev->gtt_total = (uint32_t) (end - start);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
+static inline bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_bound_any(obj) && !obj->active;
+}
+
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
- int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ /* GEM with user mode setting was never supported on ilk and later. */
+ if (INTEL_INFO(dev)->gen >= 5)
+ return -ENODEV;
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
+ i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+ args->gtt_end);
+ dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
+ pinned = 0;
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ if (i915_gem_obj_is_pinned(obj))
+ pinned += i915_gem_obj_ggtt_size(obj);
+ mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev->gtt_total;
- args->aper_available_size = (args->aper_size -
- atomic_read(&dev->pin_memory));
+ args->aper_size = dev_priv->gtt.base.total;
+ args->aper_available_size = args->aper_size - pinned;
return 0;
}
+static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+{
+ drm_dma_handle_t *phys = obj->phys_handle;
+
+ if (!phys)
+ return;
+
+ if (obj->madv == I915_MADV_WILLNEED) {
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ char *vaddr = phys->vaddr;
+ int i;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page = shmem_read_mapping_page(mapping, i);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(dst, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+ vaddr += PAGE_SIZE;
+ }
+ i915_gem_chipset_flush(obj->base.dev);
+ }
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+ drm_pci_free(obj->base.dev, phys);
+ obj->phys_handle = NULL;
+}
-/**
- * Creates a new mm object and returns a handle to it.
- */
int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
+ int align)
{
- struct drm_i915_gem_create *args = data;
- struct drm_gem_object *obj;
- int ret;
- u32 handle;
+ drm_dma_handle_t *phys;
+ struct address_space *mapping;
+ char *vaddr;
+ int i;
- args->size = roundup(args->size, PAGE_SIZE);
+ if (obj->phys_handle) {
+ if ((unsigned long)obj->phys_handle->vaddr & (align -1))
+ return -EBUSY;
- /* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
- if (obj == NULL)
+ return 0;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
+ if (obj->base.filp == NULL)
+ return -EINVAL;
+
+ /* create a new object */
+ phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
+ if (!phys)
return -ENOMEM;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ vaddr = phys->vaddr;
+#ifdef CONFIG_X86
+ set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
+#endif
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
- if (ret)
- return ret;
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
+#endif
+ drm_pci_free(obj->base.dev, phys);
+ return PTR_ERR(page);
+ }
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ kunmap_atomic(src);
- args->handle = handle;
+ mark_page_accessed(page);
+ page_cache_release(page);
+ vaddr += PAGE_SIZE;
+ }
+
+ obj->phys_handle = phys;
return 0;
}
-static inline int
-fast_shmem_read(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
+static int
+i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
{
- char __iomem *vaddr;
- int unwritten;
+ struct drm_device *dev = obj->base.dev;
+ void *vaddr = obj->phys_handle->vaddr + args->offset;
+ char __user *user_data = to_user_ptr(args->data_ptr);
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
- if (vaddr == NULL)
- return -ENOMEM;
- unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr, KM_USER0);
+ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+ unsigned long unwritten;
- if (unwritten)
- return -EFAULT;
+ /* The physical object once assigned is fixed for the lifetime
+ * of the obj, so we can safely drop the lock and continue
+ * to access vaddr.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ unwritten = copy_from_user(vaddr, user_data, args->size);
+ mutex_lock(&dev->struct_mutex);
+ if (unwritten)
+ return -EFAULT;
+ }
+ i915_gem_chipset_flush(dev);
return 0;
}
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+void *i915_gem_object_alloc(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
+}
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj_priv->tiling_mode != I915_TILING_NONE;
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ kmem_cache_free(dev_priv->slab, obj);
}
-static inline int
-slow_shmem_copy(struct page *dst_page,
- int dst_offset,
- struct page *src_page,
- int src_offset,
- int length)
+static int
+i915_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ uint32_t *handle_p)
{
- char *dst_vaddr, *src_vaddr;
+ struct drm_i915_gem_object *obj;
+ int ret;
+ u32 handle;
- dst_vaddr = kmap_atomic(dst_page, KM_USER0);
- if (dst_vaddr == NULL)
- return -ENOMEM;
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
- src_vaddr = kmap_atomic(src_page, KM_USER1);
- if (src_vaddr == NULL) {
- kunmap_atomic(dst_vaddr, KM_USER0);
+ /* Allocate the new object */
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
return -ENOMEM;
- }
- memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
-
- kunmap_atomic(src_vaddr, KM_USER1);
- kunmap_atomic(dst_vaddr, KM_USER0);
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
+ *handle_p = handle;
return 0;
}
-static inline int
-slow_shmem_bit17_copy(struct page *gpu_page,
- int gpu_offset,
- struct page *cpu_page,
- int cpu_offset,
- int length,
- int is_read)
-{
- char *gpu_vaddr, *cpu_vaddr;
-
- /* Use the unswizzled path if this page isn't affected. */
- if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
- if (is_read)
- return slow_shmem_copy(cpu_page, cpu_offset,
- gpu_page, gpu_offset, length);
- else
- return slow_shmem_copy(gpu_page, gpu_offset,
- cpu_page, cpu_offset, length);
- }
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
+ args->size = args->pitch * args->height;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
- gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
- if (gpu_vaddr == NULL)
- return -ENOMEM;
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_create *args = data;
- cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
- if (cpu_vaddr == NULL) {
- kunmap_atomic(gpu_vaddr, KM_USER0);
- return -ENOMEM;
- }
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+static inline int
+__copy_to_user_swizzled(char __user *cpu_vaddr,
+ const char *gpu_vaddr, int gpu_offset,
+ int length)
+{
+ int ret, cpu_offset = 0;
- /* Copy the data, XORing A6 with A17 (1). The user already knows he's
- * XORing with the other bits (A9 for Y, A9 and A10 for X)
- */
while (length > 0) {
int cacheline_end = ALIGN(gpu_offset + 1, 64);
int this_length = min(cacheline_end - gpu_offset, length);
int swizzled_gpu_offset = gpu_offset ^ 64;
- if (is_read) {
- memcpy(cpu_vaddr + cpu_offset,
- gpu_vaddr + swizzled_gpu_offset,
- this_length);
- } else {
- memcpy(gpu_vaddr + swizzled_gpu_offset,
- cpu_vaddr + cpu_offset,
- this_length);
- }
+ ret = __copy_to_user(cpu_vaddr + cpu_offset,
+ gpu_vaddr + swizzled_gpu_offset,
+ this_length);
+ if (ret)
+ return ret + length;
+
cpu_offset += this_length;
gpu_offset += this_length;
length -= this_length;
}
- kunmap_atomic(cpu_vaddr, KM_USER1);
- kunmap_atomic(gpu_vaddr, KM_USER0);
-
return 0;
}
-/**
- * This is the fast shmem pread path, which attempts to copy_from_user directly
- * from the backing pages of the object to the user's address space. On a
- * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
- */
-static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+static inline int
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+ const char __user *cpu_vaddr,
+ int length)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- ssize_t remain;
- loff_t offset, page_base;
- char __user *user_data;
- int page_offset, page_length;
- int ret;
+ int ret, cpu_offset = 0;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
- mutex_lock(&dev->struct_mutex);
+ ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
+ cpu_vaddr + cpu_offset,
+ this_length);
+ if (ret)
+ return ret + length;
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
- goto fail_unlock;
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0)
- goto fail_put_pages;
+ return 0;
+}
- obj_priv = obj->driver_private;
- offset = args->offset;
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ int *needs_clflush)
+{
+ int ret;
- while (remain > 0) {
- /* Operation in this page
- *
- * page_base = page offset within aperture
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_base = (offset & ~(PAGE_SIZE-1));
- page_offset = offset & (PAGE_SIZE-1);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
+ *needs_clflush = 0;
+
+ if (!obj->base.filp)
+ return -EINVAL;
- ret = fast_shmem_read(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length);
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+ obj->cache_level);
+ ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
- goto fail_put_pages;
+ return ret;
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
+ i915_gem_object_retire(obj);
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
-static inline gfp_t
-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
-{
- return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
-}
+ i915_gem_object_pin_pages(obj);
-static inline void
-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
-{
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+ return ret;
}
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
{
+ char *vaddr;
int ret;
- ret = i915_gem_object_get_pages(obj);
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
- /* If we've insufficient memory to map in the pages, attempt
- * to make some space by throwing out some old buffers.
- */
- if (ret == -ENOMEM) {
- struct drm_device *dev = obj->dev;
- gfp_t gfp;
+ vaddr = kmap_atomic(page);
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
- ret = i915_gem_evict_something(dev, obj->size);
- if (ret)
- return ret;
+ return ret ? -EFAULT : 0;
+}
- gfp = i915_gem_object_get_page_gfp_mask(obj);
- i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
- ret = i915_gem_object_get_pages(obj);
- i915_gem_object_set_page_gfp_mask (obj, gfp);
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+ bool swizzled)
+{
+ if (unlikely(swizzled)) {
+ unsigned long start = (unsigned long) addr;
+ unsigned long end = (unsigned long) addr + length;
+
+ /* For swizzling simply ensure that we always flush both
+ * channels. Lame, but simple and it works. Swizzled
+ * pwrite/pread is far from a hotpath - current userspace
+ * doesn't use it at all. */
+ start = round_down(start, 128);
+ end = round_up(end, 128);
+
+ drm_clflush_virt_range((void *)start, end - start);
+ } else {
+ drm_clflush_virt_range(addr, length);
}
- return ret;
}
-/**
- * This is the fallback shmem pread path, which allocates temporary storage
- * in kernel space to copy_to_user into outside of the struct_mutex, so we
- * can copy out of the object's backing pages while holding the struct mutex
- * and not take page faults.
- */
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct mm_struct *mm = current->mm;
- struct page **user_pages;
- ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
- int data_page_index, data_page_offset;
- int page_length;
+ char *vaddr;
int ret;
- uint64_t data_ptr = args->data_ptr;
- int do_bit17_swizzling;
- remain = args->size;
+ vaddr = kmap(page);
+ if (needs_clflush)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, yet we want to hold it while
- * dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
+ if (page_do_bit17_swizzling)
+ ret = __copy_to_user_swizzled(user_data,
+ vaddr, shmem_page_offset,
+ page_length);
+ else
+ ret = __copy_to_user(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap(page);
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
+ return ret ? - EFAULT : 0;
+}
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 1, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto fail_put_user_pages;
- }
+static int
+i915_gem_shmem_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file)
+{
+ char __user *user_data;
+ ssize_t remain;
+ loff_t offset;
+ int shmem_page_offset, page_length, ret = 0;
+ int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int prefaulted = 0;
+ int needs_clflush = 0;
+ struct sg_page_iter sg_iter;
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ user_data = to_user_ptr(args->data_ptr);
+ remain = args->size;
- mutex_lock(&dev->struct_mutex);
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- ret = i915_gem_object_get_pages_or_evict(obj);
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
- goto fail_unlock;
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0)
- goto fail_put_pages;
+ return ret;
- obj_priv = obj->driver_private;
offset = args->offset;
- while (remain > 0) {
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+ offset >> PAGE_SHIFT) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
- shmem_page_offset = offset & ~PAGE_MASK;
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = data_ptr & ~PAGE_MASK;
-
+ shmem_page_offset = offset_in_page(offset);
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
-
- if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 1);
- } else {
- ret = slow_shmem_copy(user_pages[data_page_index],
- data_page_offset,
- obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- page_length);
+
+ page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+ (page_to_phys(page) & (1 << 17)) != 0;
+
+ ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+ if (ret == 0)
+ goto next_page;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (likely(!i915.prefault_disable) && !prefaulted) {
+ ret = fault_in_multipages_writeable(user_data, remain);
+ /* Userspace is tricking us, but we've already clobbered
+ * its pages with the prefault and promised to write the
+ * data up to the first fault. Hence ignore any errors
+ * and just continue. */
+ (void)ret;
+ prefaulted = 1;
}
+
+ ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+
+ mutex_lock(&dev->struct_mutex);
+
if (ret)
- goto fail_put_pages;
+ goto out;
+next_page:
remain -= page_length;
- data_ptr += page_length;
+ user_data += page_length;
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
- for (i = 0; i < pinned_pages; i++) {
- SetPageDirty(user_pages[i]);
- page_cache_release(user_pages[i]);
- }
- drm_free_large(user_pages);
+out:
+ i915_gem_object_unpin_pages(obj);
return ret;
}
@@ -486,39 +656,53 @@ fail_put_user_pages:
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pread *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
- obj_priv = obj->driver_private;
+ if (args->size == 0)
+ return 0;
- /* Bounds check source.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
- return -EINVAL;
+ if (!access_ok(VERIFY_WRITE,
+ to_user_ptr(args->data_ptr),
+ args->size))
+ return -EFAULT;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
}
- if (i915_gem_object_needs_bit17_swizzle(obj)) {
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
- } else {
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
- if (ret != 0)
- ret = i915_gem_shmem_pread_slow(dev, obj, args,
- file_priv);
+ /* Bounds check source. */
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
+ goto out;
}
- drm_gem_object_unreference(obj);
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+ ret = i915_gem_shmem_pread(dev, obj, args, file);
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -532,61 +716,17 @@ fast_user_write(struct io_mapping *mapping,
char __user *user_data,
int length)
{
- char *vaddr_atomic;
+ void __iomem *vaddr_atomic;
+ void *vaddr;
unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
- unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = (void __force*)vaddr_atomic + page_offset;
+ unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
- if (unwritten)
- return -EFAULT;
- return 0;
-}
-
-/* Here's the write path which can sleep for
- * page faults
- */
-
-static inline int
-slow_kernel_write(struct io_mapping *mapping,
- loff_t gtt_base, int gtt_offset,
- struct page *user_page, int user_offset,
- int length)
-{
- char *src_vaddr, *dst_vaddr;
- unsigned long unwritten;
-
- dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
- src_vaddr = kmap_atomic(user_page, KM_USER1);
- unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
- kunmap_atomic(src_vaddr, KM_USER1);
- io_mapping_unmap_atomic(dst_vaddr);
- if (unwritten)
- return -EFAULT;
- return 0;
-}
-
-static inline int
-fast_shmem_write(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char __iomem *vaddr;
- unsigned long unwritten;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
- if (vaddr == NULL)
- return -ENOMEM;
- unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr, KM_USER0);
-
- if (unwritten)
- return -EFAULT;
- return 0;
+ return unwritten;
}
/**
@@ -594,36 +734,33 @@ fast_shmem_write(struct page **pages,
* user into the GTT, uncached.
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
- int page_offset, page_length;
- int ret;
+ int page_offset, page_length, ret;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
- if (!access_ok(VERIFY_READ, user_data, remain))
- return -EFAULT;
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
+ if (ret)
+ goto out;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_put_fence(obj);
if (ret)
- goto fail;
+ goto out_unpin;
- obj_priv = obj->driver_private;
- offset = obj_priv->gtt_offset + args->offset;
+ user_data = to_user_ptr(args->data_ptr);
+ remain = args->size;
+
+ offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -632,388 +769,593 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
- page_offset = offset & (PAGE_SIZE-1);
+ page_base = offset & PAGE_MASK;
+ page_offset = offset_in_page(offset);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
- page_offset, user_data, page_length);
-
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (ret)
- goto fail;
+ if (fast_user_write(dev_priv->gtt.mappable, page_base,
+ page_offset, user_data, page_length)) {
+ ret = -EFAULT;
+ goto out_unpin;
+ }
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-fail:
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
-
+out_unpin:
+ i915_gem_object_ggtt_unpin(obj);
+out:
return ret;
}
-/**
- * This is the fallback GTT pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This code resulted in x11perf -rgb10text consuming about 10% more CPU
- * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
- */
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
- ssize_t remain;
- loff_t gtt_page_base, offset;
- loff_t first_data_page, last_data_page, num_pages;
- loff_t pinned_pages, i;
- struct page **user_pages;
- struct mm_struct *mm = current->mm;
- int gtt_page_offset, data_page_offset, data_page_index, page_length;
+ char *vaddr;
int ret;
- uint64_t data_ptr = args->data_ptr;
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
+
+ vaddr = kmap_atomic(page);
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
+ user_data, page_length);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ return ret ? -EFAULT : 0;
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
+{
+ char *vaddr;
+ int ret;
+
+ vaddr = kmap(page);
+ if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ if (page_do_bit17_swizzling)
+ ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
+ user_data,
+ page_length);
+ else
+ ret = __copy_from_user(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ kunmap(page);
+
+ return ret ? -EFAULT : 0;
+}
+
+static int
+i915_gem_shmem_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file)
+{
+ ssize_t remain;
+ loff_t offset;
+ char __user *user_data;
+ int shmem_page_offset, page_length, ret = 0;
+ int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int needs_clflush_after = 0;
+ int needs_clflush_before = 0;
+ struct sg_page_iter sg_iter;
+
+ user_data = to_user_ptr(args->data_ptr);
remain = args->size;
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /* If we're not in the cpu write domain, set ourself into the gtt
+ * write domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway. */
+ needs_clflush_after = cpu_write_needs_clflush(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out_unpin_pages;
+ i915_gem_object_retire(obj);
}
+ /* Same trick applies to invalidate partially written cachelines read
+ * before writing. */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ needs_clflush_before =
+ !cpu_cache_is_coherent(dev, obj->cache_level);
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
- goto out_unlock;
+ return ret;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret)
- goto out_unpin_object;
+ i915_gem_object_pin_pages(obj);
+
+ offset = args->offset;
+ obj->dirty = 1;
- obj_priv = obj->driver_private;
- offset = obj_priv->gtt_offset + args->offset;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+ offset >> PAGE_SHIFT) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ int partial_cacheline_write;
+
+ if (remain <= 0)
+ break;
- while (remain > 0) {
/* Operation in this page
*
- * gtt_page_base = page offset within aperture
- * gtt_page_offset = offset within page in aperture
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
+ * shmem_page_offset = offset within page in shmem file
* page_length = bytes to copy for this page
*/
- gtt_page_base = offset & PAGE_MASK;
- gtt_page_offset = offset & ~PAGE_MASK;
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = data_ptr & ~PAGE_MASK;
+ shmem_page_offset = offset_in_page(offset);
page_length = remain;
- if ((gtt_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - gtt_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
+ if ((shmem_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - shmem_page_offset;
- ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch. */
+ partial_cacheline_write = needs_clflush_before &&
+ ((shmem_page_offset | page_length)
+ & (boot_cpu_data.x86_clflush_size - 1));
+
+ page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+ (page_to_phys(page) & (1 << 17)) != 0;
+
+ ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+ if (ret == 0)
+ goto next_page;
+
+ hit_slowpath = 1;
+ mutex_unlock(&dev->struct_mutex);
+ ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+
+ mutex_lock(&dev->struct_mutex);
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
if (ret)
- goto out_unpin_object;
+ goto out;
+next_page:
remain -= page_length;
+ user_data += page_length;
offset += page_length;
- data_ptr += page_length;
}
-out_unpin_object:
- i915_gem_object_unpin(obj);
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
-out_unpin_pages:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+out:
+ i915_gem_object_unpin_pages(obj);
+
+ if (hit_slowpath) {
+ /*
+ * Fixup: Flush cpu caches in case we didn't flush the dirty
+ * cachelines in-line while writing and the object moved
+ * out of the cpu write domain while we've dropped the lock.
+ */
+ if (!needs_clflush_after &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ if (i915_gem_clflush_object(obj, obj->pin_display))
+ i915_gem_chipset_flush(dev);
+ }
+ }
+
+ if (needs_clflush_after)
+ i915_gem_chipset_flush(dev);
return ret;
}
/**
- * This is the fast shmem pwrite path, which attempts to directly
- * copy_from_user into the kmapped pages backing the object.
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
*/
-static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- ssize_t remain;
- loff_t offset, page_base;
- char __user *user_data;
- int page_offset, page_length;
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_i915_gem_object *obj;
int ret;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
+ if (args->size == 0)
+ return 0;
- mutex_lock(&dev->struct_mutex);
+ if (!access_ok(VERIFY_READ,
+ to_user_ptr(args->data_ptr),
+ args->size))
+ return -EFAULT;
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
- goto fail_unlock;
+ if (likely(!i915.prefault_disable)) {
+ ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ args->size);
+ if (ret)
+ return -EFAULT;
+ }
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret != 0)
- goto fail_put_pages;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- obj_priv = obj->driver_private;
- offset = args->offset;
- obj_priv->dirty = 1;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
- while (remain > 0) {
- /* Operation in this page
- *
- * page_base = page offset within aperture
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_base = (offset & ~(PAGE_SIZE-1));
- page_offset = offset & (PAGE_SIZE-1);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
+ /* Bounds check destination. */
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
+ ret = -EINVAL;
+ goto out;
+ }
- ret = fast_shmem_write(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length);
- if (ret)
- goto fail_put_pages;
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
+ goto out;
+ }
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
+ trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
+ ret = -EFAULT;
+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
+ * it would end up going through the fenced access, and we'll get
+ * different detiling behavior between reading and writing.
+ * pread/pwrite currently are reading and writing from the CPU
+ * perspective, requiring manual detiling by the client.
+ */
+ if (obj->phys_handle) {
+ ret = i915_gem_phys_pwrite(obj, args, file);
+ goto out;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
+ if (obj->tiling_mode == I915_TILING_NONE &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ cpu_write_needs_clflush(obj)) {
+ ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+ /* Note that the gtt paths might fail with non-page-backed user
+ * pointers (e.g. gtt mappings when moving data between
+ * textures). Fallback to the shmem path in that case. */
+ }
+
+ if (ret == -EFAULT || ret == -ENOSPC)
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+i915_gem_check_wedge(struct i915_gpu_error *error,
+ bool interruptible)
+{
+ if (i915_reset_in_progress(error)) {
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but the reset failed ... */
+ if (i915_terminally_wedged(error))
+ return -EIO;
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+{
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+ ret = 0;
+ if (seqno == ring->outstanding_lazy_seqno)
+ ret = i915_add_request(ring, NULL);
return ret;
}
+static void fake_irq(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *ring)
+{
+ return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+ if (file_priv == NULL)
+ return true;
+
+ return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
/**
- * This is the fallback shmem pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @reset_counter: reset sequence associated with the given seqno
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
*
- * This avoids taking mmap_sem for faulting on the user's address while the
- * struct_mutex is held.
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
*/
-static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ unsigned reset_counter,
+ bool interruptible,
+ struct timespec *timeout,
+ struct drm_i915_file_private *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct mm_struct *mm = current->mm;
- struct page **user_pages;
- ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
- int data_page_index, data_page_offset;
- int page_length;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const bool irq_test_in_progress =
+ ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+ struct timespec before, now;
+ DEFINE_WAIT(wait);
+ unsigned long timeout_expire;
int ret;
- uint64_t data_ptr = args->data_ptr;
- int do_bit17_swizzling;
- remain = args->size;
+ WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ return 0;
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
+ timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto fail_put_user_pages;
+ if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
+ gen6_rps_boost(dev_priv);
+ if (file_priv)
+ mod_delayed_work(dev_priv->wq,
+ &file_priv->mm.idle_work,
+ msecs_to_jiffies(100));
}
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
- mutex_lock(&dev->struct_mutex);
+ /* Record current time in case interrupted by signal, or wedged */
+ trace_i915_gem_request_wait_begin(ring, seqno);
+ getrawmonotonic(&before);
+ for (;;) {
+ struct timer_list timer;
+
+ prepare_to_wait(&ring->irq_queue, &wait,
+ interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+
+ /* We need to check whether any gpu reset happened in between
+ * the caller grabbing the seqno and now ... */
+ if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+ /* ... but upgrade the -EAGAIN to an -EIO if the gpu
+ * is truely gone. */
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ if (ret == 0)
+ ret = -EAGAIN;
+ break;
+ }
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto fail_unlock;
+ if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ ret = 0;
+ break;
+ }
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret != 0)
- goto fail_put_pages;
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
- obj_priv = obj->driver_private;
- offset = args->offset;
- obj_priv->dirty = 1;
+ if (timeout && time_after_eq(jiffies, timeout_expire)) {
+ ret = -ETIME;
+ break;
+ }
- while (remain > 0) {
- /* Operation in this page
- *
- * shmem_page_index = page number within shmem file
- * shmem_page_offset = offset within page in shmem file
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
- * page_length = bytes to copy for this page
- */
- shmem_page_index = offset / PAGE_SIZE;
- shmem_page_offset = offset & ~PAGE_MASK;
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = data_ptr & ~PAGE_MASK;
+ timer.function = NULL;
+ if (timeout || missed_irq(dev_priv, ring)) {
+ unsigned long expire;
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
-
- if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 0);
- } else {
- ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
+ setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+ expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+ mod_timer(&timer, expire);
}
- if (ret)
- goto fail_put_pages;
- remain -= page_length;
- data_ptr += page_length;
- offset += page_length;
+ io_schedule();
+
+ if (timer.function) {
+ del_singleshot_timer_sync(&timer);
+ destroy_timer_on_stack(&timer);
+ }
}
+ getrawmonotonic(&now);
+ trace_i915_gem_request_wait_end(ring, seqno);
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+ if (!irq_test_in_progress)
+ ring->irq_put(ring);
+
+ finish_wait(&ring->irq_queue, &wait);
+
+ if (timeout) {
+ struct timespec sleep_time = timespec_sub(now, before);
+ *timeout = timespec_sub(*timeout, sleep_time);
+ if (!timespec_valid(timeout)) /* i.e. negative time remains */
+ set_normalized_timespec(timeout, 0, 0);
+ }
return ret;
}
/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
*/
int
-i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
{
- struct drm_i915_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible = dev_priv->mm.interruptible;
+ int ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
- obj_priv = obj->driver_private;
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
- /* Bounds check destination.
+ return __wait_seqno(ring, seqno,
+ atomic_read(&dev_priv->gpu_error.reset_counter),
+ interruptible, NULL, NULL);
+}
+
+static int
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ if (!obj->active)
+ return 0;
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
*
- * XXX: This could use review for overflow issues...
+ * Note that the last_write_seqno is always the earlier of
+ * the two (read/write) seqno, so if we haved successfully waited,
+ * we know we have passed the last write.
*/
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
- return -EINVAL;
- }
+ obj->last_write_seqno = 0;
- /* We can only do the GTT pwrite on untiled buffers, as otherwise
- * it would end up going through the fenced access, and we'll get
- * different detiling behavior between reading and writing.
- * pread/pwrite currently are reading and writing from the CPU
- * perspective, requiring manual detiling by the client.
- */
- if (obj_priv->phys_obj)
- ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
- else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0) {
- ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
- if (ret == -EFAULT) {
- ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
- file_priv);
- }
- } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
- } else {
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
- if (ret == -EFAULT) {
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
- file_priv);
- }
- }
+ return 0;
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct intel_engine_cs *ring = obj->ring;
+ u32 seqno;
+ int ret;
-#if WATCH_PWRITE
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, seqno);
if (ret)
- DRM_INFO("pwrite failed %d\n", ret);
-#endif
+ return ret;
- drm_gem_object_unreference(obj);
+ return i915_gem_object_wait_rendering__tail(obj, ring);
+}
- return ret;
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ struct drm_i915_file_private *file_priv,
+ bool readonly)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = obj->ring;
+ unsigned reset_counter;
+ u32 seqno;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!dev_priv->mm.interruptible);
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ mutex_unlock(&dev->struct_mutex);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+ mutex_lock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ return i915_gem_object_wait_rendering__tail(obj, ring);
}
/**
@@ -1022,19 +1364,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
/* Only handle setting domains to types used by the CPU. */
if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
@@ -1048,30 +1385,29 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
- obj_priv = obj->driver_private;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
- intel_mark_busy(dev, obj);
+ /* Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj,
+ file->driver_priv,
+ !write_domain);
+ if (ret)
+ goto unref;
-#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
- obj, obj->size, read_domains, write_domain);
-#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
- /* Update the LRU on the fence for the CPU access that's
- * about to occur.
- */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list,
- &dev_priv->mm.fence_list);
- }
-
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
@@ -1082,7 +1418,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
- drm_gem_object_unreference(obj);
+unref:
+ drm_gem_object_unreference(&obj->base);
+unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1092,34 +1430,28 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_sw_finish *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
}
-#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %zd)\n",
- __func__, args->handle, obj, obj->size);
-#endif
- obj_priv = obj->driver_private;
-
/* Pinned buffers may be scanout, so flush the cache */
- if (obj_priv->pin_count)
- i915_gem_object_flush_cpu_write_domain(obj);
+ if (obj->pin_display)
+ i915_gem_object_flush_cpu_write_domain(obj, true);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
+unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1133,30 +1465,28 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
- loff_t offset;
unsigned long addr;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
- offset = args->offset;
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->filp) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINVAL;
+ }
- down_write(&current->mm->mmap_sem);
- addr = do_mmap(obj->filp, 0, args->size,
+ addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
- up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1183,126 +1513,106 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+ intel_runtime_pm_get(dev_priv);
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
- /* Now bind it into the GTT if needed */
- mutex_lock(&dev->struct_mutex);
- if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto unlock;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out;
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ trace_i915_gem_object_fault(obj, page_offset, true, write);
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
- }
+ /* Try to flush the object off the GPU first without holding the lock.
+ * Upon reacquiring the lock, we will perform our sanity checks and then
+ * repeat the flush holding the lock in the normal manner to catch cases
+ * where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
+ if (ret)
+ goto unlock;
- /* Need a new fence register? */
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret)
- goto unlock;
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ ret = -EFAULT;
+ goto unlock;
}
- pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
- page_offset;
+ /* Now bind it into the GTT if needed */
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+ if (ret)
+ goto unlock;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unpin;
+
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto unpin;
+
+ obj->fault_mappable = true;
+
+ pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn >>= PAGE_SHIFT;
+ pfn += page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+unpin:
+ i915_gem_object_ggtt_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
-
+out:
switch (ret) {
+ case -EIO:
+ /* If this -EIO is due to a gpu hang, give the reset code a
+ * chance to clean up the mess. Otherwise return the proper
+ * SIGBUS. */
+ if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ ret = VM_FAULT_SIGBUS;
+ break;
+ }
+ case -EAGAIN:
+ /*
+ * EAGAIN means the gpu is hung and we'll wait for the error
+ * handler to reset everything when re-faulting in
+ * i915_mutex_lock_interruptible.
+ */
case 0:
case -ERESTARTSYS:
- return VM_FAULT_NOPAGE;
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ ret = VM_FAULT_NOPAGE;
+ break;
case -ENOMEM:
- case -EAGAIN:
- return VM_FAULT_OOM;
+ ret = VM_FAULT_OOM;
+ break;
+ case -ENOSPC:
+ case -EFAULT:
+ ret = VM_FAULT_SIGBUS;
+ break;
default:
- return VM_FAULT_SIGBUS;
- }
-}
-
-/**
- * i915_gem_create_mmap_offset - create a fake mmap offset for an object
- * @obj: obj in question
- *
- * GEM memory mapping works by handing back to userspace a fake mmap offset
- * it can use in a subsequent mmap(2) call. The DRM core code then looks
- * up the object based on the offset and sets up the various memory mapping
- * structures.
- *
- * This routine allocates and attaches a fake offset for @obj.
- */
-static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret = 0;
-
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
-
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOMEM;
- goto out_free_list;
- }
-
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
-
- list->hash.key = list->file_offset_node->start;
- if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
- DRM_ERROR("failed to add to map hash\n");
- ret = -ENOMEM;
- goto out_free_mm;
+ WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
+ ret = VM_FAULT_SIGBUS;
+ break;
}
- /* By now we should be all set, any drm_mmap request on the offset
- * below will get to our mmap & fault handler */
- obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
- return 0;
-
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
-
+ intel_runtime_pm_put(dev_priv);
return ret;
}
@@ -1321,38 +1631,44 @@ out_free_list:
* fixup by i915_gem_fault().
*/
void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ if (!obj->fault_mappable)
+ return;
- if (dev->dev_mapping)
- unmap_mapping_range(dev->dev_mapping,
- obj_priv->mmap_offset, obj->size, 1);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ obj->fault_mappable = false;
}
-static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
+ struct drm_i915_gem_object *obj;
- list = &obj->map_list;
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ i915_gem_release_mmap(obj);
+}
- if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
- list->file_offset_node = NULL;
- }
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+{
+ uint32_t gtt_size;
- if (list->map) {
- kfree(list->map);
- list->map = NULL;
- }
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ tiling_mode == I915_TILING_NONE)
+ return size;
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ gtt_size = 1024*1024;
+ else
+ gtt_size = 512*1024;
- obj_priv->mmap_offset = 0;
+ while (gtt_size < size)
+ gtt_size <<= 1;
+
+ return gtt_size;
}
/**
@@ -1360,42 +1676,115 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
* @obj: object to check
*
* Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
*/
-static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int start, i;
-
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+ tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (IS_I9XX(dev))
- start = 1024*1024;
- else
- start = 512*1024;
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
+}
+
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (drm_vma_node_has_offset(&obj->base.vma_node))
+ return 0;
+
+ dev_priv->mm.shrinker_no_lock_stealing = true;
+
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ goto out;
+
+ /* Badly fragmented mmap space? The only way we can recover
+ * space is by destroying unwanted objects. We can't randomly release
+ * mmap_offsets as userspace expects them to be persistent for the
+ * lifetime of the objects. The closest we can is to release the
+ * offsets on purgeable objects by truncating it and marking it purged,
+ * which prevents userspace from ever using that object again.
+ */
+ i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ goto out;
+
+ i915_gem_shrink_all(dev_priv);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+ dev_priv->mm.shrinker_no_lock_stealing = false;
+
+ return ret;
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ drm_gem_free_mmap_offset(&obj->base);
+}
+
+int
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->base.size > dev_priv->gtt.mappable_end) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
+ ret = -EFAULT;
+ goto out;
+ }
- for (i = start; i < obj->size; i <<= 1)
- ;
+ ret = i915_gem_object_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
- return i;
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1408,1384 +1797,1774 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_mmap_gtt *args = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
+}
- mutex_lock(&dev->struct_mutex);
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_free_mmap_offset(obj);
- obj_priv = obj->driver_private;
+ if (obj->base.filp == NULL)
+ return;
- if (obj_priv->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to mmap a purgeable buffer\n");
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
+ obj->madv = __I915_MADV_PURGED;
+}
+
+/* Try to discard unwanted pages */
+static void
+i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct address_space *mapping;
+
+ switch (obj->madv) {
+ case I915_MADV_DONTNEED:
+ i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return;
}
+ if (obj->base.filp == NULL)
+ return;
- if (!obj_priv->mmap_offset) {
- ret = i915_gem_create_mmap_offset(obj);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ mapping = file_inode(obj->base.filp)->i_mapping,
+ invalidate_mapping_pages(mapping, 0, (loff_t)-1);
+}
+
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ struct sg_page_iter sg_iter;
+ int ret;
+
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ i915_gem_clflush_object(obj, true);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
- args->offset = obj_priv->mmap_offset;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_save_bit_17_swizzle(obj);
+
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
+
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+
+ if (obj->dirty)
+ set_page_dirty(page);
+
+ if (obj->madv == I915_MADV_WILLNEED)
+ mark_page_accessed(page);
+
+ page_cache_release(page);
+ }
+ obj->dirty = 0;
+
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+ if (obj->pages == NULL)
+ return 0;
+
+ if (obj->pages_pin_count)
+ return -EBUSY;
+
+ BUG_ON(i915_gem_obj_bound_any(obj));
+
+ /* ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early. */
+ list_del(&obj->global_list);
+
+ ops->put_pages(obj);
+ obj->pages = NULL;
+
+ i915_gem_object_invalidate(obj);
+
+ return 0;
+}
+
+static unsigned long
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+ bool purgeable_only)
+{
+ struct list_head still_in_list;
+ struct drm_i915_gem_object *obj;
+ unsigned long count = 0;
/*
- * Pull it into the GTT so that we have a page list (makes the
- * initial fault faster and any subsequent flushing possible).
+ * As we may completely rewrite the (un)bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ *
+ * In particular, we must hold a reference whilst removing the
+ * object as we may end up waiting for and/or retiring the objects.
+ * This might release the final reference (held by the active list)
+ * and result in the object being freed from under us. This is
+ * similar to the precautions the eviction code must take whilst
+ * removing objects.
+ *
+ * Also note that although these lists do not hold a reference to
+ * the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
*/
- if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
+ obj = list_first_entry(&dev_priv->mm.unbound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
}
+ list_splice(&still_in_list, &dev_priv->mm.unbound_list);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+ struct i915_vma *vma, *v;
- return 0;
+ obj = list_first_entry(&dev_priv->mm.bound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, &dev_priv->mm.bound_list);
+
+ return count;
}
-void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+static unsigned long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count = obj->size / PAGE_SIZE;
- int i;
+ return __i915_gem_shrink(dev_priv, target, true);
+}
- BUG_ON(obj_priv->pages_refcount == 0);
- BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
+static unsigned long
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ i915_gem_evict_everything(dev_priv->dev);
+ return __i915_gem_shrink(dev_priv, LONG_MAX, false);
+}
- if (--obj_priv->pages_refcount != 0)
- return;
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int page_count, i;
+ struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct sg_page_iter sg_iter;
+ struct page *page;
+ unsigned long last_pfn = 0; /* suppress gcc warning */
+ gfp_t gfp;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_save_bit_17_swizzle(obj);
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- if (obj_priv->madv == I915_MADV_DONTNEED)
- obj_priv->dirty = 0;
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+ page_count = obj->base.size / PAGE_SIZE;
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ sg = st->sgl;
+ st->nents = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->pages[i] == NULL)
- break;
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ i915_gem_purge(dev_priv, page_count);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ }
+ if (IS_ERR(page)) {
+ /* We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ */
+ gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
+ gfp |= __GFP_IO | __GFP_WAIT;
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ i915_gem_shrink_all(dev_priv);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page))
+ goto err_pages;
- if (obj_priv->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj_priv->pages[i]);
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ }
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ st->nents++;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ continue;
+ }
+#endif
+ if (!i || page_to_pfn(page) != last_pfn + 1) {
+ if (i)
+ sg = sg_next(sg);
+ st->nents++;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ } else {
+ sg->length += PAGE_SIZE;
+ }
+ last_pfn = page_to_pfn(page);
- page_cache_release(obj_priv->pages[i]);
+ /* Check that the i965g/gm workaround works. */
+ WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
- obj_priv->dirty = 0;
+#ifdef CONFIG_SWIOTLB
+ if (!swiotlb_nr_tbl())
+#endif
+ sg_mark_end(sg);
+ obj->pages = st;
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ return 0;
+
+err_pages:
+ sg_mark_end(sg);
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+ page_cache_release(sg_page_iter_page(&sg_iter));
+ sg_free_table(st);
+ kfree(st);
+
+ /* shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (PTR_ERR(page) == -ENOSPC)
+ return -ENOMEM;
+ else
+ return PTR_ERR(page);
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->pages)
+ return 0;
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ return -EFAULT;
+ }
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ return 0;
}
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = intel_ring_get_seqno(ring);
+
+ BUG_ON(ring == NULL);
+ if (obj->ring != ring && obj->last_write_seqno) {
+ /* Keep the seqno relative to the current ring */
+ obj->last_write_seqno = seqno;
+ }
+ obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
- if (!obj_priv->active) {
- drm_gem_object_reference(obj);
- obj_priv->active = 1;
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
}
- /* Move from whatever list we were on to the tail of execution. */
- spin_lock(&dev_priv->mm.active_list_lock);
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
- obj_priv->last_rendering_seqno = seqno;
+
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_read_seqno = seqno;
+
+ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+
+ /* Bump MRU to take account of the delayed flush */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
+}
+
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_engine_cs *ring)
+{
+ list_move_tail(&vma->mm_list, &vma->vm->active_list);
+ return i915_gem_object_move_to_active(vma->obj, ring);
}
static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+
+ BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
+ BUG_ON(!obj->active);
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (vma && !list_empty(&vma->mm_list))
+ list_move_tail(&vma->mm_list, &vm->inactive_list);
+ }
+
+ list_del_init(&obj->ring_list);
+ obj->ring = NULL;
- BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
- obj_priv->last_rendering_seqno = 0;
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
+ obj->base.write_domain = 0;
+
+ obj->last_fenced_seqno = 0;
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ drm_gem_object_unreference(&obj->base);
+
+ WARN_ON(i915_verify_lists(dev));
}
-/* Immediately discard the backing storage */
static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct inode *inode;
+ struct intel_engine_cs *ring = obj->ring;
- inode = obj->filp->f_path.dentry->d_inode;
- if (inode->i_op->truncate)
- inode->i_op->truncate (inode);
+ if (ring == NULL)
+ return;
- obj_priv->madv = __I915_MADV_PURGED;
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_read_seqno))
+ i915_gem_object_move_to_inactive(obj);
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+static int
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int ret, i, j;
+
+ /* Carefully retire all requests without writing to the rings */
+ for_each_ring(ring, dev_priv, i) {
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
+ }
+ i915_gem_retire_requests(dev);
+
+ /* Finally reset hw state */
+ for_each_ring(ring, dev_priv, i) {
+ intel_ring_init_seqno(ring, seqno);
+
+ for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
+ ring->semaphore.sync_seqno[j] = 0;
+ }
+
+ return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
- return obj_priv->madv == I915_MADV_DONTNEED;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (seqno == 0)
+ return -EINVAL;
+
+ /* HWS page needs to be set less than what we
+ * will inject to ring
+ */
+ ret = i915_gem_init_seqno(dev, seqno - 1);
+ if (ret)
+ return ret;
+
+ /* Carefully set the last_seqno value so that wrap
+ * detection still works
+ */
+ dev_priv->next_seqno = seqno;
+ dev_priv->last_seqno = seqno - 1;
+ if (dev_priv->last_seqno == 0)
+ dev_priv->last_seqno--;
+
+ return 0;
}
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- i915_verify_inactive(dev, __FILE__, __LINE__);
- if (obj_priv->pin_count != 0)
- list_del_init(&obj_priv->list);
- else
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_init_seqno(dev, 0);
+ if (ret)
+ return ret;
- obj_priv->last_rendering_seqno = 0;
- if (obj_priv->active) {
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
+ dev_priv->next_seqno = 1;
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+ return 0;
}
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-uint32_t
-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains)
+int __i915_add_request(struct intel_engine_cs *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_object *obj,
+ u32 *out_seqno)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = NULL;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
- uint32_t seqno;
- int was_empty;
- RING_LOCALS;
+ u32 request_ring_position, request_start;
+ int ret;
- if (file_priv != NULL)
- i915_file_priv = file_priv->driver_priv;
+ request_start = intel_ring_get_tail(ring);
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
+ request = ring->preallocated_lazy_request;
+ if (WARN_ON(request == NULL))
+ return -ENOMEM;
- /* Grab the seqno we're going to make this request be, and bump the
- * next (skipping 0 so it can be the reserved no-seqno value).
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
*/
- seqno = dev_priv->mm.next_gem_seqno;
- dev_priv->mm.next_gem_seqno++;
- if (dev_priv->mm.next_gem_seqno == 0)
- dev_priv->mm.next_gem_seqno++;
+ request_ring_position = intel_ring_get_tail(ring);
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+ ret = ring->add_request(ring);
+ if (ret)
+ return ret;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ request->seqno = intel_ring_get_seqno(ring);
+ request->ring = ring;
+ request->head = request_start;
+ request->tail = request_ring_position;
- DRM_DEBUG_DRIVER("%d\n", seqno);
+ /* Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when this
+ * request is retired will the the batch_obj be moved onto the
+ * inactive_list and lose its active reference. Hence we do not need
+ * to explicitly hold another reference here.
+ */
+ request->batch_obj = obj;
+
+ /* Hold a reference to the current context so that we can inspect
+ * it later in case a hangcheck error event fires.
+ */
+ request->ctx = ring->last_context;
+ if (request->ctx)
+ i915_gem_context_reference(request->ctx);
- request->seqno = seqno;
request->emitted_jiffies = jiffies;
- was_empty = list_empty(&dev_priv->mm.request_list);
- list_add_tail(&request->list, &dev_priv->mm.request_list);
- if (i915_file_priv) {
+ list_add_tail(&request->list, &ring->request_list);
+ request->file_priv = NULL;
+
+ if (file) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ request->file_priv = file_priv;
list_add_tail(&request->client_list,
- &i915_file_priv->mm.request_list);
- } else {
- INIT_LIST_HEAD(&request->client_list);
+ &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
- /* Associate any objects on the flushing list matching the write
- * domain we're flushing with our flush.
- */
- if (flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
+ trace_i915_gem_request_add(ring, request->seqno);
+ ring->outstanding_lazy_seqno = 0;
+ ring->preallocated_lazy_request = NULL;
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ if (!dev_priv->ums.mm_suspended) {
+ i915_queue_hangcheck(ring->dev);
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
- uint32_t old_write_domain = obj->write_domain;
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_mark_busy(dev_priv->dev);
+ }
- obj->write_domain = 0;
- i915_gem_object_move_to_active(obj, seqno);
+ if (out_seqno)
+ *out_seqno = request->seqno;
+ return 0;
+}
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_file_private *file_priv = request->file_priv;
+
+ if (!file_priv)
+ return;
+
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
+}
+
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+ const struct intel_context *ctx)
+{
+ unsigned long elapsed;
+
+ elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
+
+ if (ctx->hang_stats.banned)
+ return true;
+
+ if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+ if (!i915_gem_context_is_default(ctx)) {
+ DRM_DEBUG("context hanging too fast, banning!\n");
+ return true;
+ } else if (i915_stop_ring_allow_ban(dev_priv)) {
+ if (i915_stop_ring_allow_warn(dev_priv))
+ DRM_ERROR("gpu hanging too fast, banning!\n");
+ return true;
}
+ }
+ return false;
+}
+
+static void i915_set_reset_status(struct drm_i915_private *dev_priv,
+ struct intel_context *ctx,
+ const bool guilty)
+{
+ struct i915_ctx_hang_stats *hs;
+
+ if (WARN_ON(!ctx))
+ return;
+
+ hs = &ctx->hang_stats;
+
+ if (guilty) {
+ hs->banned = i915_context_is_banned(dev_priv, ctx);
+ hs->batch_active++;
+ hs->guilty_ts = get_seconds();
+ } else {
+ hs->batch_pending++;
}
+}
+
+static void i915_gem_free_request(struct drm_i915_gem_request *request)
+{
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+
+ if (request->ctx)
+ i915_gem_context_unreference(request->ctx);
+
+ kfree(request);
+}
+
+struct drm_i915_gem_request *
+i915_gem_find_active_request(struct intel_engine_cs *ring)
+{
+ struct drm_i915_gem_request *request;
+ u32 completed_seqno;
+
+ completed_seqno = ring->get_seqno(ring, false);
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (i915_seqno_passed(completed_seqno, request->seqno))
+ continue;
- if (!dev_priv->mm.suspended) {
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- if (was_empty)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ return request;
}
- return seqno;
+
+ return NULL;
}
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-static uint32_t
-i915_retire_commands(struct drm_device *dev)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- uint32_t flush_domains = 0;
- RING_LOCALS;
+ struct drm_i915_gem_request *request;
+ bool ring_hung;
+
+ request = i915_gem_find_active_request(ring);
+
+ if (request == NULL)
+ return;
+
+ ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+
+ i915_set_reset_status(dev_priv, request->ctx, ring_hung);
- /* The sampler always gets flushed on i965 (sigh) */
- if (IS_I965G(dev))
- flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
- return flush_domains;
+ list_for_each_entry_continue(request, &ring->request_list, list)
+ i915_set_reset_status(dev_priv, request->ctx, false);
}
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
- struct drm_i915_gem_request *request)
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
- trace_i915_gem_request_retire(dev, request->seqno);
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
+ i915_gem_object_move_to_inactive(obj);
+ }
+
+ /*
+ * We must free the requests after all the corresponding objects have
+ * been moved off active lists. Which is the same order as the normal
+ * retire_requests function does. This is important if object hold
+ * implicit references on things like e.g. ppgtt address spaces through
+ * the request.
*/
- spin_lock(&dev_priv->mm.active_list_lock);
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
-
- /* If the seqno being retired doesn't match the oldest in the
- * list, then the oldest in the list must still be newer than
- * this seqno.
- */
- if (obj_priv->last_rendering_seqno != request->seqno)
- goto out;
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
-#if WATCH_LRU
- DRM_INFO("%s: retire %d moves to inactive list %p\n",
- __func__, request->seqno, obj);
-#endif
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
- if (obj->write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else {
- /* Take a reference on the object so it won't be
- * freed while the spinlock is held. The list
- * protection for this spinlock is safe when breaking
- * the lock like this since the next thing we do
- * is just get the head of the list again.
- */
- drm_gem_object_reference(obj);
- i915_gem_object_move_to_inactive(obj);
- spin_unlock(&dev_priv->mm.active_list_lock);
- drm_gem_object_unreference(obj);
- spin_lock(&dev_priv->mm.active_list_lock);
- }
+ i915_gem_free_request(request);
}
-out:
- spin_unlock(&dev_priv->mm.active_list_lock);
+
+ /* These may not have been flush before the reset, do so now */
+ kfree(ring->preallocated_lazy_request);
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
}
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+void i915_gem_restore_fences(struct drm_device *dev)
{
- return (int32_t)(seq1 - seq2) >= 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
+ }
}
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+void i915_gem_reset(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ /*
+ * Before we free the objects from the requests, we need to inspect
+ * them for finding the guilty party. As the requests only borrow
+ * their reference to the objects, the inspection must be done first.
+ */
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_status(dev_priv, ring);
+
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_cleanup(dev_priv, ring);
+
+ i915_gem_context_reset(dev);
+
+ i915_gem_restore_fences(dev);
}
/**
* This function clears the request list as sequence numbers are passed.
*/
void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+ if (list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev);
+ WARN_ON(i915_verify_lists(ring->dev));
- while (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
- uint32_t retiring_seqno;
+ seqno = ring->get_seqno(ring, true);
- request = list_first_entry(&dev_priv->mm.request_list,
- struct drm_i915_gem_request,
- list);
- retiring_seqno = request->seqno;
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate,
+ * before we free the context associated with the requests.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
- if (i915_seqno_passed(seqno, retiring_seqno) ||
- atomic_read(&dev_priv->mm.wedged)) {
- i915_gem_retire_request(dev, request);
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- list_del(&request->list);
- list_del(&request->client_list);
- kfree(request);
- } else
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- }
- if (unlikely (dev_priv->trace_irq_seqno &&
- i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- i915_user_irq_put(dev);
- dev_priv->trace_irq_seqno = 0;
+ i915_gem_object_move_to_inactive(obj);
}
-}
-
-void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
- drm_i915_private_t *dev_priv;
- struct drm_device *dev;
- dev_priv = container_of(work, drm_i915_private_t,
- mm.retire_work.work);
- dev = dev_priv->dev;
- mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
- if (!dev_priv->mm.suspended &&
- !list_empty(&dev_priv->mm.request_list))
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
- mutex_unlock(&dev->struct_mutex);
-}
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
-int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 ier;
- int ret = 0;
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
- BUG_ON(seqno == 0);
+ if (!i915_seqno_passed(seqno, request->seqno))
+ break;
- if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
+ trace_i915_gem_request_retire(ring, request->seqno);
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ */
+ ring->buffer->last_retired_head = request->tail;
- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IRONLAKE(dev))
- ier = I915_READ(DEIER) | I915_READ(GTIER);
- else
- ier = I915_READ(IER);
- if (!ier) {
- DRM_ERROR("something (likely vbetool) disabled "
- "interrupts, re-enabling\n");
- i915_driver_irq_preinstall(dev);
- i915_driver_irq_postinstall(dev);
- }
+ i915_gem_free_request(request);
+ }
- trace_i915_gem_request_wait_begin(dev, seqno);
+ if (unlikely(ring->trace_irq_seqno &&
+ i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+ ring->irq_put(ring);
+ ring->trace_irq_seqno = 0;
+ }
- dev_priv->mm.waiting_gem_seqno = seqno;
- i915_user_irq_get(dev);
- if (interruptible)
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ WARN_ON(i915_verify_lists(ring->dev));
+}
- i915_user_irq_put(dev);
- dev_priv->mm.waiting_gem_seqno = 0;
+bool
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ bool idle = true;
+ int i;
- trace_i915_gem_request_wait_end(dev, seqno);
+ for_each_ring(ring, dev_priv, i) {
+ i915_gem_retire_requests_ring(ring);
+ idle &= list_empty(&ring->request_list);
}
- if (atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- if (ret && ret != -ERESTARTSYS)
- DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, i915_get_gem_seqno(dev));
-
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0)
- i915_gem_retire_requests(dev);
+ if (idle)
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->mm.idle_work,
+ msecs_to_jiffies(100));
- return ret;
+ return idle;
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
{
- return i915_do_wait_request(dev, seqno, 1);
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), mm.retire_work.work);
+ struct drm_device *dev = dev_priv->dev;
+ bool idle;
+
+ /* Come back later if the device is busy... */
+ idle = false;
+ if (mutex_trylock(&dev->struct_mutex)) {
+ idle = i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+ if (!idle)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
}
static void
-i915_gem_flush(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
+i915_gem_idle_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd;
- RING_LOCALS;
-
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
- trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
- invalidate_domains, flush_domains);
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), mm.idle_work.work);
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
-
- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
+ intel_mark_idle(dev_priv->dev);
}
/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
*/
static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
- /* This function only exists to support waiting for existing rendering,
- * not for emitting required flushes.
- */
- BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
- /* If there is rendering queued on the buffer being evicted, wait for
- * it.
- */
- if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
- if (ret != 0)
+ if (obj->active) {
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+ if (ret)
return ret;
+
+ i915_gem_retire_requests_ring(obj->ring);
}
return 0;
}
/**
- * Unbinds an object from the GTT aperture.
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ * -ETIME: object is still busy after timeout
+ * -ERESTARTSYS: signal interrupted the wait
+ * -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ * -EAGAIN: GPU wedged
+ * -ENOMEM: damn
+ * -ENODEV: Internal IRQ fail
+ * -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
*/
int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_wait *args = data;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *ring = NULL;
+ struct timespec timeout_stack, *timeout = NULL;
+ unsigned reset_counter;
+ u32 seqno = 0;
int ret = 0;
-#if WATCH_BUF
- DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
- DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
-#endif
- if (obj_priv->gtt_space == NULL)
- return 0;
+ if (args->timeout_ns >= 0) {
+ timeout_stack = ns_to_timespec(args->timeout_ns);
+ timeout = &timeout_stack;
+ }
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return -EINVAL;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+ if (&obj->base == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOENT;
}
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
+ /* Need to make sure the object gets inactive eventually. */
+ ret = i915_gem_object_flush_active(obj);
+ if (ret)
+ goto out;
+
+ if (obj->active) {
+ seqno = obj->last_read_seqno;
+ ring = obj->ring;
+ }
+
+ if (seqno == 0)
+ goto out;
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it. This will
- * also ensure that all pending GPU writes are finished
- * before we unbind.
+ /* Do this after OLR check to make sure we make forward progress polling
+ * on this IOCTL with a 0 timeout (like busy ioctl)
*/
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("set_domain failed: %d\n", ret);
- return ret;
+ if (!args->timeout_ns) {
+ ret = -ETIME;
+ goto out;
}
- BUG_ON(obj_priv->active);
+ drm_gem_object_unreference(&obj->base);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ mutex_unlock(&dev->struct_mutex);
- /* release the fence reg _after_ flushing */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
+ if (timeout)
+ args->timeout_ns = timespec_to_ns(timeout);
+ return ret;
- if (obj_priv->agp_mem != NULL) {
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
- obj_priv->agp_mem = NULL;
- }
+out:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
- i915_gem_object_put_pages(obj);
- BUG_ON(obj_priv->pages_refcount);
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *to)
+{
+ struct intel_engine_cs *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
- if (obj_priv->gtt_space) {
- atomic_dec(&dev->gtt_count);
- atomic_sub(obj->size, &dev->gtt_memory);
+ if (from == NULL || to == from)
+ return 0;
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- }
+ if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj, false);
- /* Remove ourselves from the LRU list if present. */
- if (!list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
+ idx = intel_ring_sync_index(from, to);
- if (i915_gem_object_is_purgeable(obj_priv))
- i915_gem_object_truncate(obj);
+ seqno = obj->last_read_seqno;
+ if (seqno <= from->semaphore.sync_seqno[idx])
+ return 0;
- trace_i915_gem_object_unbind(obj);
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
- return 0;
+ trace_i915_gem_ring_sync_to(from, to, seqno);
+ ret = to->semaphore.sync_to(to, from, seqno);
+ if (!ret)
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+
+ return ret;
}
-static struct drm_gem_object *
-i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *best = NULL;
- struct drm_gem_object *first = NULL;
-
- /* Try to find the smallest clean object */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->size >= min_size) {
- if ((!obj_priv->dirty ||
- i915_gem_object_is_purgeable(obj_priv)) &&
- (!best || obj->size < best->size)) {
- best = obj;
- if (best->size == min_size)
- return best;
- }
- if (!first)
- first = obj;
- }
- }
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+ u32 old_write_domain, old_read_domains;
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ return;
- return best ? best : first;
+ /* Wait for any direct GTT access to complete */
+ mb();
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
}
-static int
-i915_gem_evict_everything(struct drm_device *dev)
+int i915_vma_unbind(struct i915_vma *vma)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno;
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- bool lists_empty;
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
+ if (list_empty(&vma->vma_link))
+ return 0;
- if (lists_empty)
- return -ENOSPC;
+ if (!drm_mm_node_allocated(&vma->node)) {
+ i915_gem_vma_destroy(vma);
+ return 0;
+ }
- /* Flush everything (on to the inactive lists) and evict */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return -ENOMEM;
+ if (vma->pin_count)
+ return -EBUSY;
- ret = i915_wait_request(dev, seqno);
- if (ret)
- return ret;
+ BUG_ON(obj->pages == NULL);
- ret = i915_gem_evict_from_inactive_list(dev);
+ ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
+ /* Continue on if we fail due to EIO, the GPU is hung so we
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
- BUG_ON(!lists_empty);
+ if (i915_is_ggtt(vma->vm)) {
+ i915_gem_object_finish_gtt(obj);
- return 0;
-}
+ /* release the fence reg _after_ flushing */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
-static int
-i915_gem_evict_something(struct drm_device *dev, int min_size)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- int ret;
+ trace_i915_vma_unbind(vma);
- for (;;) {
- i915_gem_retire_requests(dev);
+ vma->unbind_vma(vma);
- /* If there's an inactive buffer available now, grab it
- * and be done.
- */
- obj = i915_gem_find_inactive_object(dev, min_size);
- if (obj) {
- struct drm_i915_gem_object *obj_priv;
+ i915_gem_gtt_finish_object(obj);
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
- obj_priv = obj->driver_private;
- BUG_ON(obj_priv->pin_count != 0);
- BUG_ON(obj_priv->active);
+ list_del_init(&vma->mm_list);
+ /* Avoid an unnecessary call to unbind on rebind. */
+ if (i915_is_ggtt(vma->vm))
+ obj->map_and_fenceable = true;
- /* Wait on the rendering and unbind the buffer. */
- return i915_gem_object_unbind(obj);
- }
+ drm_mm_remove_node(&vma->node);
+ i915_gem_vma_destroy(vma);
- /* If we didn't get anything, but the ring is still processing
- * things, wait for the next to finish and hopefully leave us
- * a buffer to evict.
- */
- if (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist. */
+ if (list_empty(&obj->vma_list))
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
- request = list_first_entry(&dev_priv->mm.request_list,
- struct drm_i915_gem_request,
- list);
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
- ret = i915_wait_request(dev, request->seqno);
- if (ret)
- return ret;
+ return 0;
+}
- continue;
- }
+int i915_gpu_idle(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int ret, i;
- /* If we didn't have anything on the request list but there
- * are buffers awaiting a flush, emit one and try again.
- * When we wait on it, those buffers waiting for that flush
- * will get moved to inactive.
- */
- if (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
+ /* Flush everything onto the inactive list. */
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_switch_context(ring, ring->default_context);
+ if (ret)
+ return ret;
- /* Find an object that we can immediately reuse */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = obj_priv->obj;
- if (obj->size >= min_size)
- break;
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
+ }
- obj = NULL;
- }
+ return 0;
+}
- if (obj != NULL) {
- uint32_t seqno;
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fence_reg;
+ int fence_pitch_shift;
- i915_gem_flush(dev,
- obj->write_domain,
- obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
- if (seqno == 0)
- return -ENOMEM;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ fence_reg = FENCE_REG_SANDYBRIDGE_0;
+ fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ } else {
+ fence_reg = FENCE_REG_965_0;
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
- ret = i915_wait_request(dev, seqno);
- if (ret)
- return ret;
+ fence_reg += reg * 8;
- continue;
- }
- }
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint64_t val;
+
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+ 0xfffff000) << 32;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
+}
- /* If we didn't do any of the above, there's no single buffer
- * large enough to swap out for the new one, so just evict
- * everything and start again. (This should be rare.)
- */
- if (!list_empty (&dev_priv->mm.inactive_list))
- return i915_gem_evict_from_inactive_list(dev);
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ int pitch_val;
+ int tile_width;
+
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
else
- return i915_gem_evict_everything(dev);
- }
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = i915_gem_obj_ggtt_offset(obj);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
-int
-i915_gem_object_get_pages(struct drm_gem_object *obj)
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count, i;
- struct address_space *mapping;
- struct inode *inode;
- struct page *page;
- int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
- if (obj_priv->pages_refcount++ != 0)
- return 0;
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint32_t pitch_val;
+
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+ i915_gem_obj_ggtt_offset(obj), size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = i915_gem_obj_ggtt_offset(obj);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->pages != NULL);
- obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
- if (obj_priv->pages == NULL) {
- obj_priv->pages_refcount--;
- return -ENOMEM;
- }
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+ return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- for (i = 0; i < page_count; i++) {
- page = read_mapping_page(mapping, i, NULL);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- i915_gem_object_put_pages(obj);
- return ret;
- }
- obj_priv->pages[i] = page;
- }
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_do_bit_17_swizzle(obj);
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+ mb();
+
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 8:
+ case 7:
+ case 6:
+ case 5:
+ case 4: i965_write_fence_reg(dev, reg, obj); break;
+ case 3: i915_write_fence_reg(dev, reg, obj); break;
+ case 2: i830_write_fence_reg(dev, reg, obj); break;
+ default: BUG();
+ }
+
+ /* And similarly be paranoid that no direct access to this region
+ * is reordered to before the fence is installed.
+ */
+ if (i915_gem_object_needs_mb(obj))
+ mb();
+}
- return 0;
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
}
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int regnum = obj_priv->fence_reg;
- uint64_t val;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
- 0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
- I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
+ obj->fence_dirty = false;
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int regnum = obj_priv->fence_reg;
- int tile_width;
- uint32_t fence_reg, val;
- uint32_t pitch_val;
+ if (obj->last_fenced_seqno) {
+ int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ if (ret)
+ return ret;
- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
- __func__, obj_priv->gtt_offset, obj->size);
- return;
+ obj->last_fenced_seqno = 0;
}
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
+ obj->fenced_gpu_access = false;
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_fence_reg *fence;
+ int ret;
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj_priv->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(obj->size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
- if (regnum < 8)
- fence_reg = FENCE_REG_830_0 + (regnum * 4);
- else
- fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
- I915_WRITE(fence_reg, val);
+ fence = &dev_priv->fence_regs[obj->fence_reg];
+
+ if (WARN_ON(fence->pin_count))
+ return -EBUSY;
+
+ i915_gem_object_fence_lost(obj);
+ i915_gem_object_update_fence(obj, fence, false);
+
+ return 0;
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int regnum = obj_priv->fence_reg;
- uint32_t val;
- uint32_t pitch_val;
- uint32_t fence_size_bits;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg, *avail;
+ int i;
- if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
- __func__, obj_priv->gtt_offset);
- return;
+ /* First try to find a free reg */
+ avail = NULL;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return reg;
+
+ if (!reg->pin_count)
+ avail = reg;
}
- pitch_val = obj_priv->stride / 128;
- pitch_val = ffs(pitch_val) - 1;
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ if (avail == NULL)
+ goto deadlock;
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
- WARN_ON(fence_size_bits & ~0x00000f00);
- val |= fence_size_bits;
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
+ /* None available, try to steal one or wait for a user to finish */
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->pin_count)
+ continue;
+
+ return reg;
+ }
+
+deadlock:
+ /* Wait for completion of pending flips which consume fences */
+ if (intel_has_pending_fb_unpin(dev))
+ return ERR_PTR(-EAGAIN);
- I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ return ERR_PTR(-EDEADLK);
}
/**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
- *
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *old_obj_priv = NULL;
- int i, ret, avail;
-
- /* Just update our place in the LRU if our fence is getting used. */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
- return 0;
- }
-
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- WARN(1, "allocating a fence for non-tiled object?\n");
- break;
- case I915_TILING_X:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (512 - 1)),
- "object 0x%08x is X tiled but has non-512B pitch\n",
- obj_priv->gtt_offset);
- break;
- case I915_TILING_Y:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (128 - 1)),
- "object 0x%08x is Y tiled but has non-128B pitch\n",
- obj_priv->gtt_offset);
- break;
- }
-
- /* First try to find a free reg */
- avail = 0;
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- break;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
+ struct drm_i915_fence_reg *reg;
+ int ret;
- old_obj_priv = reg->obj->driver_private;
- if (!old_obj_priv->pin_count)
- avail++;
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
}
- /* None available, try to steal one or wait for a user to finish */
- if (i == dev_priv->num_fence_regs) {
- struct drm_gem_object *old_obj = NULL;
-
- if (avail == 0)
- return -ENOSPC;
-
- list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- old_obj = old_obj_priv->obj;
-
- if (old_obj_priv->pin_count)
- continue;
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
+ }
+ } else if (enable) {
+ reg = i915_find_fence_reg(dev);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
- /* Take a reference, as otherwise the wait_rendering
- * below may cause the object to get freed out from
- * under us.
- */
- drm_gem_object_reference(old_obj);
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
- /* i915 uses fences for GPU access to tiled buffers */
- if (IS_I965G(dev) || !old_obj_priv->active)
- break;
-
- /* This brings the object to the head of the LRU if it
- * had been written to. The only way this should
- * result in us waiting longer than the expected
- * optimal amount of time is if there was a
- * fence-using buffer later that was read-only.
- */
- i915_gem_object_flush_gpu_write_domain(old_obj);
- ret = i915_gem_object_wait_rendering(old_obj);
- if (ret != 0) {
- drm_gem_object_unreference(old_obj);
+ ret = i915_gem_object_wait_fence(old);
+ if (ret)
return ret;
- }
- break;
+ i915_gem_object_fence_lost(old);
}
+ } else
+ return 0;
- /*
- * Zap this virtual mapping so we can set up a fence again
- * for this object next time we need it.
- */
- i915_gem_release_mmap(old_obj);
-
- i = old_obj_priv->fence_reg;
- reg = &dev_priv->fence_regs[i];
-
- old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&old_obj_priv->fence_list);
-
- drm_gem_object_unreference(old_obj);
- }
-
- obj_priv->fence_reg = i;
- list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
-
- reg->obj = obj;
-
- if (IS_I965G(dev))
- i965_write_fence_reg(reg);
- else if (IS_I9XX(dev))
- i915_write_fence_reg(reg);
- else
- i830_write_fence_reg(reg);
-
- trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+ i915_gem_object_update_fence(obj, reg, enable);
return 0;
}
-/**
- * i915_gem_clear_fence_reg - clear out fence register info
- * @obj: object to clear
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+ struct drm_mm_node *gtt_space,
+ unsigned long cache_level)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_mm_node *other;
- if (IS_I965G(dev))
- I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- else {
- uint32_t fence_reg;
+ /* On non-LLC machines we have to be careful when putting differing
+ * types of snoopable memory together to avoid the prefetcher
+ * crossing memory domains and dying.
+ */
+ if (HAS_LLC(dev))
+ return true;
- if (obj_priv->fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
- else
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
- 8) * 4;
+ if (!drm_mm_node_allocated(gtt_space))
+ return true;
- I915_WRITE(fence_reg, 0);
- }
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
- dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&obj_priv->fence_list);
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
}
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+static void i915_gem_verify_gtt(struct drm_device *dev)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+#if WATCH_GTT
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
- return 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
+ if (obj->gtt_space == NULL) {
+ printk(KERN_ERR "object found on GTT list with no space reserved\n");
+ err++;
+ continue;
+ }
- /* On the i915, GPU access to tiled buffers is via a fence,
- * therefore we must wait for any outstanding access to complete
- * before clearing the fence.
- */
- if (!IS_I965G(dev)) {
- int ret;
+ if (obj->cache_level != obj->gtt_space->color) {
+ printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
+ obj->cache_level,
+ obj->gtt_space->color);
+ err++;
+ continue;
+ }
- i915_gem_object_flush_gpu_write_domain(obj);
- i915_gem_object_flush_gtt_write_domain(obj);
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
+ if (!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level)) {
+ printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
+ obj->cache_level);
+ err++;
+ continue;
+ }
}
- i915_gem_clear_fence_reg (obj);
-
- return 0;
+ WARN_ON(err);
+#endif
}
/**
* Finds free space in the GTT aperture and binds the object there.
*/
-static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+static struct i915_vma *
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ uint64_t flags)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct drm_mm_node *free_space;
- bool retry_alloc = false;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 size, fence_size, fence_alignment, unfenced_alignment;
+ unsigned long start =
+ flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+ unsigned long end =
+ flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
+ struct i915_vma *vma;
int ret;
- if (obj_priv->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to bind a purgeable object\n");
- return -EINVAL;
- }
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode, true);
+ unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode, false);
if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
- DRM_ERROR("Invalid object alignment requested %u\n", alignment);
- return -EINVAL;
+ alignment = flags & PIN_MAPPABLE ? fence_alignment :
+ unfenced_alignment;
+ if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
+ DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
+ return ERR_PTR(-EINVAL);
}
- search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- obj->size, alignment, 0);
- if (free_space != NULL) {
- obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- alignment);
- if (obj_priv->gtt_space != NULL) {
- obj_priv->gtt_space->private = obj;
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- }
- }
- if (obj_priv->gtt_space == NULL) {
- /* If the gtt is empty and we're still having trouble
- * fitting our object in, we're out of memory.
- */
-#if WATCH_LRU
- DRM_INFO("%s: GTT full, evicting something\n", __func__);
-#endif
- ret = i915_gem_evict_something(dev, obj->size);
- if (ret)
- return ret;
+ size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
- goto search_free;
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->base.size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
+ obj->base.size,
+ flags & PIN_MAPPABLE ? "mappable" : "total",
+ end);
+ return ERR_PTR(-E2BIG);
}
-#if WATCH_BUF
- DRM_INFO("Binding object of size %zd at 0x%08x\n",
- obj->size, obj_priv->gtt_offset);
-#endif
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
- }
ret = i915_gem_object_get_pages(obj);
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
- }
- if (ret) {
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
-
- if (ret == -ENOMEM) {
- /* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size);
- if (ret) {
- /* now try to shrink everyone else */
- if (! retry_alloc) {
- retry_alloc = true;
- goto search_free;
- }
+ if (ret)
+ return ERR_PTR(ret);
- return ret;
- }
+ i915_gem_object_pin_pages(obj);
+
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (IS_ERR(vma))
+ goto err_unpin;
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+ size, alignment,
+ obj->cache_level,
+ start, end,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ if (ret) {
+ ret = i915_gem_evict_something(dev, vm, size, alignment,
+ obj->cache_level,
+ start, end,
+ flags);
+ if (ret == 0)
goto search_free;
- }
- return ret;
+ goto err_free_vma;
+ }
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+ obj->cache_level))) {
+ ret = -EINVAL;
+ goto err_remove_node;
}
- /* Create an AGP memory structure pointing at our pages, and bind it
- * into the GTT.
- */
- obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->pages,
- obj->size >> PAGE_SHIFT,
- obj_priv->gtt_offset,
- obj_priv->agp_type);
- if (obj_priv->agp_mem == NULL) {
- i915_gem_object_put_pages(obj);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
-
- ret = i915_gem_evict_something(dev, obj->size);
- if (ret)
- return ret;
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret)
+ goto err_remove_node;
+
+ list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
+ list_add_tail(&vma->mm_list, &vm->inactive_list);
+
+ if (i915_is_ggtt(vm)) {
+ bool mappable, fenceable;
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
- goto search_free;
+ mappable = (vma->node.start + obj->base.size <=
+ dev_priv->gtt.mappable_end);
+
+ obj->map_and_fenceable = mappable && fenceable;
}
- atomic_inc(&dev->gtt_count);
- atomic_add(obj->size, &dev->gtt_memory);
- /* Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
- trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+ trace_i915_vma_bind(vma, flags);
+ vma->bind_vma(vma, obj->cache_level,
+ flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
- return 0;
+ i915_gem_verify_gtt(dev);
+ return vma;
+
+err_remove_node:
+ drm_mm_remove_node(&vma->node);
+err_free_vma:
+ i915_gem_vma_destroy(vma);
+ vma = ERR_PTR(ret);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return vma;
}
-void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+bool
+i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj_priv->pages == NULL)
- return;
-
- trace_i915_gem_object_clflush(obj);
-
- drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
-}
-
-/** Flushes any GPU write domain for the object if it's dirty. */
-static void
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- uint32_t seqno;
- uint32_t old_write_domain;
+ if (obj->pages == NULL)
+ return false;
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return;
+ /*
+ * Stolen memory is always coherent with the GPU as it is explicitly
+ * marked as wc by the system, or the system is cache-coherent.
+ */
+ if (obj->stolen)
+ return false;
+
+ /* If the GPU is snooping the contents of the CPU cache,
+ * we do not need to manually clear the CPU cache lines. However,
+ * the caches are only snooped when the render cache is
+ * flushed/invalidated. As we always have to emit invalidations
+ * and flushes when moving into and out of the RENDER domain, correct
+ * snooping behaviour occurs naturally as the result of our domain
+ * tracking.
+ */
+ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ return false;
- /* Queue the GPU write cache flushing we need. */
- old_write_domain = obj->write_domain;
- i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
- obj->write_domain = 0;
- i915_gem_object_move_to_active(obj, seqno);
+ trace_i915_gem_object_clflush(obj);
+ drm_clflush_sg(obj->pages);
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
+ return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
- /* No actual flushing is required for the GTT write domain. Writes
+ /* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
*/
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ wmb();
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+ bool force)
{
- struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ if (i915_gem_clflush_object(obj, force))
+ i915_gem_chipset_flush(obj->base.dev);
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
-void
-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
-{
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- i915_gem_object_flush_gtt_write_domain(obj);
- break;
- case I915_GEM_DOMAIN_CPU:
- i915_gem_object_flush_cpu_write_domain(obj);
- break;
- default:
- i915_gem_object_flush_gpu_write_domain(obj);
- break;
- }
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -2793,656 +3572,387 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (!i915_gem_obj_bound_any(obj))
return -EINVAL;
- i915_gem_object_flush_gpu_write_domain(obj);
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
return ret;
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ i915_gem_object_retire(obj);
+ i915_gem_object_flush_cpu_write_domain(obj, false);
- /* If we're writing through the GTT domain, then CPU and GPU caches
- * will need to be invalidated at next use.
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * GTT domain upon first access.
*/
- if (write)
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ mb();
- i915_gem_object_flush_cpu_write_domain(obj);
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
+ /* And bump the LRU for this access */
+ if (i915_gem_object_is_inactive(obj)) {
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ if (vma)
+ list_move_tail(&vma->mm_list,
+ &dev_priv->gtt.base.inactive_list);
+
+ }
+
return 0;
}
-/**
- * Moves a single object to the CPU read, and possibly write domain.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
{
- uint32_t old_write_domain, old_read_domains;
+ struct drm_device *dev = obj->base.dev;
+ struct i915_vma *vma, *next;
int ret;
- i915_gem_object_flush_gpu_write_domain(obj);
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
+ if (obj->cache_level == cache_level)
+ return 0;
- i915_gem_object_flush_gtt_write_domain(obj);
+ if (i915_gem_obj_is_pinned(obj)) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
- /* If we have a partially-valid cache of the object in the CPU,
- * finish invalidating it and free the per-page flags.
- */
- i915_gem_object_set_to_full_cpu_read_domain(obj);
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (i915_gem_obj_bound_any(obj)) {
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret)
+ return ret;
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ i915_gem_object_finish_gtt(obj);
- /* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- i915_gem_clflush_object(obj);
+ /* Before SandyBridge, you could not use tiling or fence
+ * registers with snooped memory, so relinquish any fences
+ * currently pointing to our region in the aperture.
+ */
+ if (INTEL_INFO(dev)->gen < 6) {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ vma->bind_vma(vma, cache_level,
+ obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
}
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ vma->node.color = cache_level;
+ obj->cache_level = cache_level;
- /* If we're writing through the CPU, then the GPU read domains will
- * need to be invalidated at next use.
- */
- if (write) {
- obj->read_domains &= I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- }
+ if (cpu_write_needs_clflush(obj)) {
+ u32 old_read_domains, old_write_domain;
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
+ /* If we're coming from LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain and have been skipping the clflushes.
+ * Just set it to the CPU cache for now.
+ */
+ i915_gem_object_retire(obj);
+ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+ }
+
+ i915_gem_verify_gtt(dev);
return 0;
}
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- uint32_t invalidate_domains = 0;
- uint32_t flush_domains = 0;
- uint32_t old_read_domains;
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
- BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
- BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- intel_mark_busy(dev, obj);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
-#if WATCH_BUF
- DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
- obj->read_domains, obj->pending_read_domains,
- obj->write_domain, obj->pending_write_domain);
-#endif
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->pending_write_domain == 0)
- obj->pending_read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
+ switch (obj->cache_level) {
+ case I915_CACHE_LLC:
+ case I915_CACHE_L3_LLC:
+ args->caching = I915_CACHING_CACHED;
+ break;
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain &&
- obj->write_domain != obj->pending_read_domains) {
- flush_domains |= obj->write_domain;
- invalidate_domains |=
- obj->pending_read_domains & ~obj->write_domain;
+ case I915_CACHE_WT:
+ args->caching = I915_CACHING_DISPLAY;
+ break;
+
+ default:
+ args->caching = I915_CACHING_NONE;
+ break;
}
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
-#if WATCH_BUF
- DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
- __func__, flush_domains, invalidate_domains);
-#endif
- i915_gem_clflush_object(obj);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ level = I915_CACHE_LLC;
+ break;
+ case I915_CACHING_DISPLAY:
+ level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ break;
+ default:
+ return -EINVAL;
}
- old_read_domains = obj->read_domains;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->pending_write_domain == 0)
- obj->pending_write_domain = obj->write_domain;
- obj->read_domains = obj->pending_read_domains;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
-#if WATCH_BUF
- DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
- __func__,
- obj->read_domains, obj->write_domain,
- dev->invalidate_domains, dev->flush_domains);
-#endif
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->write_domain);
+ ret = i915_gem_object_set_cache_level(obj, level);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
-/**
- * Moves the object from a partially CPU read to a full one.
- *
- * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
- * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
- */
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+static bool is_pin_display(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct i915_vma *vma;
- if (!obj_priv->page_cpu_valid)
- return;
+ if (list_empty(&obj->vma_list))
+ return false;
- /* If we're partially in the CPU read domain, finish moving it in.
- */
- if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
- int i;
+ vma = i915_gem_obj_to_ggtt(obj);
+ if (!vma)
+ return false;
- for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
- if (obj_priv->page_cpu_valid[i])
- continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
- }
- }
-
- /* Free the page_cpu_valid mappings which are now stale, whether
- * or not we've got I915_GEM_DOMAIN_CPU.
+ /* There are 3 sources that pin objects:
+ * 1. The display engine (scanouts, sprites, cursors);
+ * 2. Reservations for execbuffer;
+ * 3. The user.
+ *
+ * We can ignore reservations as we hold the struct_mutex and
+ * are only called outside of the reservation path. The user
+ * can only increment pin_count once, and so if after
+ * subtracting the potential reference by the user, any pin_count
+ * remains, it must be due to another use by the display engine.
*/
- kfree(obj_priv->page_cpu_valid);
- obj_priv->page_cpu_valid = NULL;
+ return vma->pin_count - !!obj->user_pin_count;
}
-/**
- * Set the CPU read domain on a range of the object.
- *
- * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
- * not entirely valid. The page_cpu_valid member of the object flags which
- * pages have been flushed, and will be respected by
- * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
- * of the whole object.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
+/*
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
*/
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size)
+int
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ struct intel_engine_cs *pipelined)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- uint32_t old_read_domains;
- int i, ret;
-
- if (offset == 0 && size == obj->size)
- return i915_gem_object_set_to_cpu_domain(obj, 0);
+ u32 old_read_domains, old_write_domain;
+ bool was_pin_display;
+ int ret;
- i915_gem_object_flush_gpu_write_domain(obj);
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
- i915_gem_object_flush_gtt_write_domain(obj);
+ if (pipelined != obj->ring) {
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
+ return ret;
+ }
- /* If we're already fully in the CPU read domain, we're done. */
- if (obj_priv->page_cpu_valid == NULL &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
- return 0;
+ /* Mark the pin_display early so that we account for the
+ * display coherency whilst setting up the cache domains.
+ */
+ was_pin_display = obj->pin_display;
+ obj->pin_display = true;
- /* Otherwise, create/clear the per-page CPU read domain flag if we're
- * newly adding I915_GEM_DOMAIN_CPU
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
- if (obj_priv->page_cpu_valid == NULL) {
- obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj_priv->page_cpu_valid == NULL)
- return -ENOMEM;
- } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
-
- /* Flush the cache on any pages that are still invalid from the CPU's
- * perspective.
+ ret = i915_gem_object_set_cache_level(obj,
+ HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+ if (ret)
+ goto err_unpin_display;
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers.
*/
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
- i++) {
- if (obj_priv->page_cpu_valid[i])
- continue;
+ ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+ if (ret)
+ goto err_unpin_display;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ i915_gem_object_flush_cpu_write_domain(obj, true);
- obj_priv->page_cpu_valid[i] = 1;
- }
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
-
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = 0;
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
+ old_write_domain);
return 0;
+
+err_unpin_display:
+ WARN_ON(was_pin_display != is_pin_display(obj));
+ obj->pin_display = was_pin_display;
+ return ret;
}
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
-static int
-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *relocs)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int i, ret;
- void __iomem *reloc_page;
- bool need_fence;
+void
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_ggtt_unpin(obj);
+ obj->pin_display = is_pin_display(obj);
+}
- need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj_priv->tiling_mode != I915_TILING_NONE;
+int
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+{
+ int ret;
- /* Check fence reg constraints and rebind if necessary */
- if (need_fence && !i915_obj_fenceable(dev, obj))
- i915_gem_object_unbind(obj);
+ if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
- /* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle blits to/from tiled surfaces.
- */
- if (need_fence) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
- i915_gem_object_unpin(obj);
- return ret;
- }
- }
-
- entry->offset = obj_priv->gtt_offset;
-
- /* Apply the relocations, using the GTT aperture to avoid cache
- * flushing requirements.
- */
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
- struct drm_gem_object *target_obj;
- struct drm_i915_gem_object *target_obj_priv;
- uint32_t reloc_val, reloc_offset;
- uint32_t __iomem *reloc_entry;
-
- target_obj = drm_gem_object_lookup(obj->dev, file_priv,
- reloc->target_handle);
- if (target_obj == NULL) {
- i915_gem_object_unpin(obj);
- return -EBADF;
- }
- target_obj_priv = target_obj->driver_private;
-
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_obj_priv->gtt_space == NULL) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc->target_handle);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
- reloc->read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- target_obj->pending_read_domains |= reloc->read_domains;
- target_obj->pending_write_domain |= reloc->write_domain;
-
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
- drm_gem_object_unreference(target_obj);
- continue;
- }
-
- /* Check that the relocation address is valid... */
- if (reloc->offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- /* and points to somewhere within the target object. */
- if (reloc->delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta, (int) target_obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- /* Map the page containing the relocation we're going to
- * perform.
- */
- reloc_offset = obj_priv->gtt_offset + reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- (reloc_offset &
- ~(PAGE_SIZE - 1)));
- reloc_entry = (uint32_t __iomem *)(reloc_page +
- (reloc_offset & (PAGE_SIZE - 1)));
- reloc_val = target_obj_priv->gtt_offset + reloc->delta;
-
-#if WATCH_BUF
- DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
- obj, (unsigned int) reloc->offset,
- readl(reloc_entry), reloc_val);
-#endif
- writel(reloc_val, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
-
- /* The updated presumed offset for this entry will be
- * copied back out to the user.
- */
- reloc->presumed_offset = target_obj_priv->gtt_offset;
-
- drm_gem_object_unreference(target_obj);
- }
-
-#if WATCH_BUF
- if (0)
- i915_gem_dump_object(obj, 128, __func__, ~0);
-#endif
+ /* Ensure that we invalidate the GPU's caches and TLBs. */
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
return 0;
}
-/** Dispatch a batchbuffer to the ring
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
*/
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- RING_LOCALS;
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
- count = nbox ? nbox : 1;
+ i915_gem_object_retire(obj);
+ i915_gem_object_flush_gtt_write_domain(obj);
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
- if (IS_I830(dev) || IS_845G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- OUT_RING(exec_start + exec_len - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- OUT_RING(exec_start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6));
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- }
- ADVANCE_LP_RING();
- }
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj, false);
+
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
- /* XXX breadcrumb */
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -3457,808 +3967,292 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
* relatively low latency when blocking on a particular request to finish.
*/
static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- int ret = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ struct drm_i915_gem_request *request;
+ struct intel_engine_cs *ring = NULL;
+ unsigned reset_counter;
+ u32 seqno = 0;
+ int ret;
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ if (ret)
+ return ret;
- request = list_first_entry(&i915_file_priv->mm.request_list,
- struct drm_i915_gem_request,
- client_list);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+ if (ret)
+ return ret;
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ret = i915_wait_request(dev, request->seqno);
- if (ret != 0)
- break;
- }
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
- uint32_t buffer_count,
- struct drm_i915_gem_relocation_entry **relocs)
-{
- uint32_t reloc_count = 0, reloc_index = 0, i;
- int ret;
-
- *relocs = NULL;
- for (i = 0; i < buffer_count; i++) {
- if (reloc_count + exec_list[i].relocation_count < reloc_count)
- return -EINVAL;
- reloc_count += exec_list[i].relocation_count;
+ ring = request->ring;
+ seqno = request->seqno;
}
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ spin_unlock(&file_priv->mm.lock);
- *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL) {
- DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
- return -ENOMEM;
- }
-
- for (i = 0; i < buffer_count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
-
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-
- ret = copy_from_user(&(*relocs)[reloc_index],
- user_relocs,
- exec_list[i].relocation_count *
- sizeof(**relocs));
- if (ret != 0) {
- drm_free_large(*relocs);
- *relocs = NULL;
- return -EFAULT;
- }
-
- reloc_index += exec_list[i].relocation_count;
- }
-
- return 0;
-}
-
-static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
- uint32_t buffer_count,
- struct drm_i915_gem_relocation_entry *relocs)
-{
- uint32_t reloc_count = 0, i;
- int ret = 0;
-
- for (i = 0; i < buffer_count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- int unwritten;
-
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-
- unwritten = copy_to_user(user_relocs,
- &relocs[reloc_count],
- exec_list[i].relocation_count *
- sizeof(*relocs));
-
- if (unwritten) {
- ret = -EFAULT;
- goto err;
- }
-
- reloc_count += exec_list[i].relocation_count;
- }
+ if (seqno == 0)
+ return 0;
-err:
- drm_free_large(relocs);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
return ret;
}
-static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
- uint64_t exec_offset)
+static bool
+i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
{
- uint32_t exec_start, exec_len;
+ struct drm_i915_gem_object *obj = vma->obj;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
+ if (alignment &&
+ vma->node.start & (alignment - 1))
+ return true;
- if ((exec_start | exec_len) & 0x7)
- return -EINVAL;
+ if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+ return true;
- if (!exec_start)
- return -EINVAL;
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
- return 0;
+ return false;
}
-static int
-i915_gem_wait_for_pending_flip(struct drm_device *dev,
- struct drm_gem_object **object_list,
- int count)
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- DEFINE_WAIT(wait);
- int i, ret = 0;
-
- for (;;) {
- prepare_to_wait(&dev_priv->pending_flip_queue,
- &wait, TASK_INTERRUPTIBLE);
- for (i = 0; i < count; i++) {
- obj_priv = object_list[i]->driver_private;
- if (atomic_read(&obj_priv->pending_flip) > 0)
- break;
- }
- if (i == count)
- break;
-
- if (!signal_pending(current)) {
- mutex_unlock(&dev->struct_mutex);
- schedule();
- mutex_lock(&dev->struct_mutex);
- continue;
- }
- ret = -ERESTARTSYS;
- break;
- }
- finish_wait(&dev_priv->pending_flip_queue, &wait);
-
- return ret;
-}
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *vma;
+ int ret;
-int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv,
- struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec_list)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object **object_list = NULL;
- struct drm_gem_object *batch_obj;
- struct drm_i915_gem_object *obj_priv;
- struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_relocation_entry *relocs;
- int ret = 0, ret2, i, pinned = 0;
- uint64_t exec_offset;
- uint32_t seqno, flush_domains, reloc_index;
- int pin_tries, flips;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
+ if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
+ return -ENODEV;
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
- }
- object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (object_list == NULL) {
- DRM_ERROR("Failed to allocate object list for %d buffers\n",
- args->buffer_count);
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (args->num_cliprects != 0) {
- cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL)
- goto pre_mutex_err;
-
- ret = copy_from_user(cliprects,
- (struct drm_clip_rect __user *)
- (uintptr_t) args->cliprects_ptr,
- sizeof(*cliprects) * args->num_cliprects);
- if (ret != 0) {
- DRM_ERROR("copy %d cliprects failed: %d\n",
- args->num_cliprects, ret);
- goto pre_mutex_err;
- }
- }
-
- ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
- &relocs);
- if (ret != 0)
- goto pre_mutex_err;
-
- mutex_lock(&dev->struct_mutex);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EIO;
- goto pre_mutex_err;
- }
-
- if (dev_priv->mm.suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
- /* Look up object handles */
- flips = 0;
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file_priv,
- exec_list[i].handle);
- if (object_list[i] == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- ret = -EBADF;
- goto err;
- }
-
- obj_priv = object_list[i]->driver_private;
- if (obj_priv->in_execbuffer) {
- DRM_ERROR("Object %p appears more than once in object list\n",
- object_list[i]);
- ret = -EBADF;
- goto err;
- }
- obj_priv->in_execbuffer = true;
- flips += atomic_read(&obj_priv->pending_flip);
- }
-
- if (flips > 0) {
- ret = i915_gem_wait_for_pending_flip(dev, object_list,
- args->buffer_count);
- if (ret)
- goto err;
- }
-
- /* Pin and relocate */
- for (pin_tries = 0; ; pin_tries++) {
- ret = 0;
- reloc_index = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i],
- &relocs[reloc_index]);
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (vma) {
+ if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+ return -EBUSY;
+
+ if (i915_vma_misplaced(vma, alignment, flags)) {
+ WARN(vma->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ i915_gem_obj_offset(obj, vm), alignment,
+ !!(flags & PIN_MAPPABLE),
+ obj->map_and_fenceable);
+ ret = i915_vma_unbind(vma);
if (ret)
- break;
- pinned = i + 1;
- reloc_index += exec_list[i].relocation_count;
- }
- /* success */
- if (ret == 0)
- break;
-
- /* error other than GTT full, or we've already tried again */
- if (ret != -ENOSPC || pin_tries >= 1) {
- if (ret != -ERESTARTSYS) {
- unsigned long long total_size = 0;
- for (i = 0; i < args->buffer_count; i++)
- total_size += object_list[i]->size;
- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
- pinned+1, args->buffer_count,
- total_size, ret);
- DRM_ERROR("%d objects [%d pinned], "
- "%d object bytes [%d pinned], "
- "%d/%d gtt bytes\n",
- atomic_read(&dev->object_count),
- atomic_read(&dev->pin_count),
- atomic_read(&dev->object_memory),
- atomic_read(&dev->pin_memory),
- atomic_read(&dev->gtt_memory),
- dev->gtt_total);
- }
- goto err;
- }
-
- /* unpin all of our buffers */
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
- pinned = 0;
-
- /* evict everyone we can from the aperture */
- ret = i915_gem_evict_everything(dev);
- if (ret && ret != -ENOSPC)
- goto err;
- }
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- batch_obj = object_list[args->buffer_count-1];
- if (batch_obj->pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
- ret = -EINVAL;
- goto err;
- }
- batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
- /* Sanity check the batch buffer, prior to moving objects */
- exec_offset = exec_list[args->buffer_count - 1].offset;
- ret = i915_gem_check_execbuffer (args, exec_offset);
- if (ret != 0) {
- DRM_ERROR("execbuf with invalid offset/length\n");
- goto err;
- }
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- /* Compute new gpu domains and update invalidate/flush */
- i915_gem_object_set_to_gpu_domain(obj);
- }
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- if (dev->flush_domains)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains);
- }
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = obj->pending_write_domain;
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
-#if WATCH_COHERENCY
- for (i = 0; i < args->buffer_count; i++) {
- i915_gem_object_check_coherency(object_list[i],
- exec_list[i].handle);
- }
-#endif
-
-#if WATCH_EXEC
- i915_gem_dump_object(batch_obj,
- args->batch_len,
- __func__,
- ~0);
-#endif
-
- /* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
- if (ret) {
- DRM_ERROR("dispatch failed %d\n", ret);
- goto err;
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- flush_domains = i915_retire_commands(dev);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- /*
- * Get a seqno representing the execution of the current buffer,
- * which we can wait on. We would like to mitigate these interrupts,
- * likely by only creating seqnos occasionally (so that we have
- * *some* interrupts representing completion of buffers that we can
- * wait on when trying to clear up gtt space).
- */
- seqno = i915_add_request(dev, file_priv, flush_domains);
- BUG_ON(seqno == 0);
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- i915_gem_object_move_to_active(obj, seqno);
-#if WATCH_LRU
- DRM_INFO("%s: move to exec list %p\n", __func__, obj);
-#endif
- }
-#if WATCH_LRU
- i915_dump_lru(dev, __func__);
-#endif
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
-err:
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
+ return ret;
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]) {
- obj_priv = object_list[i]->driver_private;
- obj_priv->in_execbuffer = false;
+ vma = NULL;
}
- drm_gem_object_unreference(object_list[i]);
}
- mutex_unlock(&dev->struct_mutex);
-
- /* Copy the updated relocations out regardless of current error
- * state. Failure to update the relocs would mean that the next
- * time userland calls execbuf, it would do so with presumed offset
- * state that didn't match the actual object state.
- */
- ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
- relocs);
- if (ret2 != 0) {
- DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
-
- if (ret == 0)
- ret = ret2;
+ if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
+ vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
}
-pre_mutex_err:
- drm_free_large(object_list);
- kfree(cliprects);
-
- return ret;
-}
-
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
-int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_execbuffer2 exec2;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret, i;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
+ if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+ vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
+ vma->pin_count++;
+ if (flags & PIN_MAPPABLE)
+ obj->pin_mappable |= true;
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -ENOMEM;
- }
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- for (i = 0; i < args->buffer_count; i++) {
- exec2_list[i].handle = exec_list[i].handle;
- exec2_list[i].relocation_count = exec_list[i].relocation_count;
- exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
- exec2_list[i].alignment = exec_list[i].alignment;
- exec2_list[i].offset = exec_list[i].offset;
- if (!IS_I965G(dev))
- exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
- else
- exec2_list[i].flags = 0;
- }
-
- exec2.buffers_ptr = args->buffers_ptr;
- exec2.buffer_count = args->buffer_count;
- exec2.batch_start_offset = args->batch_start_offset;
- exec2.batch_len = args->batch_len;
- exec2.DR1 = args->DR1;
- exec2.DR4 = args->DR4;
- exec2.num_cliprects = args->num_cliprects;
- exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = 0;
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- } else {
- DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
- }
-
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return ret;
+ return 0;
}
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+void
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_execbuffer2 *args = data;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- return -ENOMEM;
- }
- ret = copy_from_user(exec2_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
- ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
+ BUG_ON(!vma);
+ BUG_ON(vma->pin_count == 0);
+ BUG_ON(!i915_gem_obj_ggtt_bound(obj));
- drm_free_large(exec2_list);
- return ret;
+ if (--vma->pin_count == 0)
+ obj->pin_mappable = false;
}
-int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret;
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
- if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
- if (ret)
- return ret;
- }
-
- obj_priv->pin_count++;
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (obj_priv->pin_count == 1) {
- atomic_inc(&dev->pin_count);
- atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
- !list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
- }
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- return 0;
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+ WARN_ON(!ggtt_vma ||
+ dev_priv->fence_regs[obj->fence_reg].pin_count >
+ ggtt_vma->pin_count);
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
}
void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
- obj_priv->pin_count--;
- BUG_ON(obj_priv->pin_count < 0);
- BUG_ON(obj_priv->gtt_space == NULL);
-
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (obj_priv->pin_count == 0) {
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.inactive_list);
- atomic_dec(&dev->pin_count);
- atomic_sub(obj->size, &dev->pin_memory);
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
}
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
- mutex_lock(&dev->struct_mutex);
+ if (INTEL_INFO(dev)->gen >= 6)
+ return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
}
- obj_priv = obj->driver_private;
- if (obj_priv->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to pin a purgeable buffer\n");
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_DEBUG("Attempting to pin a purgeable buffer\n");
+ ret = -EFAULT;
+ goto out;
}
- if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
- DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ if (obj->pin_filp != NULL && obj->pin_filp != file) {
+ DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- obj_priv->user_pin_count++;
- obj_priv->pin_filp = file_priv;
- if (obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (obj->user_pin_count == ULONG_MAX) {
+ ret = -EBUSY;
+ goto out;
}
- /* XXX - flush the CPU caches for pinned objects
- * as the X server doesn't manage domains yet
- */
- i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj_priv->gtt_offset;
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ if (obj->user_pin_count == 0) {
+ ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
+ if (ret)
+ goto out;
+ }
- return 0;
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+
+ args->offset = i915_gem_obj_ggtt_offset(obj);
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
+ int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
}
- obj_priv = obj->driver_private;
- if (obj_priv->pin_filp != file_priv) {
- DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ if (obj->pin_filp != file) {
+ DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- obj_priv->user_pin_count--;
- if (obj_priv->user_pin_count == 0) {
- obj_priv->pin_filp = NULL;
- i915_gem_object_unpin(obj);
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ obj->pin_filp = NULL;
+ i915_gem_object_ggtt_unpin(obj);
}
- drm_gem_object_unreference(obj);
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_busy *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
+ int ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
- args->handle);
- return -EBADF;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
}
- mutex_lock(&dev->struct_mutex);
- /* Update the active list for the hardware's current position.
- * Otherwise this only updates on a delayed timer or when irqs are
- * actually unmasked, and our working set ends up being larger than
- * required.
+ /* Count all active objects as busy, even if they are currently not used
+ * by the gpu. Users of this interface expect objects to eventually
+ * become non-busy without any further actions, therefore emit any
+ * necessary flushes here.
*/
- i915_gem_retire_requests(dev);
+ ret = i915_gem_object_flush_active(obj);
- obj_priv = obj->driver_private;
- /* Don't count being on the flushing list against the object being
- * done. Otherwise, a buffer left on the flushing list but not getting
- * flushed (because nobody's flushing that domain) won't ever return
- * unbusy and get reused by libdrm's bo cache. The other expected
- * consumer of this interface, OpenGL's occlusion queries, also specs
- * that the objects get unbusy "eventually" without any interference.
- */
- args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
+ args->busy = obj->active;
+ if (obj->ring) {
+ BUILD_BUG_ON(I915_NUM_RINGS > 16);
+ args->busy |= intel_ring_flag(obj->ring) << 16;
+ }
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
+unlock:
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- return i915_gem_ring_throttle(dev, file_priv);
+ return i915_gem_ring_throttle(dev, file_priv);
}
int
@@ -4266,8 +4260,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_madvise *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
+ int ret;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4277,479 +4271,544 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
- args->handle);
- return -EBADF;
- }
-
- mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- if (obj_priv->pin_count) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
- DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
- return -EINVAL;
+ if (i915_gem_obj_is_pinned(obj)) {
+ ret = -EINVAL;
+ goto out;
}
- if (obj_priv->madv != __I915_MADV_PURGED)
- obj_priv->madv = args->madv;
+ if (obj->madv != __I915_MADV_PURGED)
+ obj->madv = args->madv;
- /* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj_priv) &&
- obj_priv->gtt_space == NULL)
+ /* if the object is no longer attached, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
i915_gem_object_truncate(obj);
- args->retained = obj_priv->madv != __I915_MADV_PURGED;
+ args->retained = obj->madv != __I915_MADV_PURGED;
- drm_gem_object_unreference(obj);
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
- return 0;
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ INIT_LIST_HEAD(&obj->global_list);
+ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->obj_exec_link);
+ INIT_LIST_HEAD(&obj->vma_list);
+
+ obj->ops = ops;
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
+
+ i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}
-int i915_gem_init_object(struct drm_gem_object *obj)
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
+ gfp_t mask;
- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
- if (obj_priv == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return NULL;
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ i915_gem_object_free(obj);
+ return NULL;
+ }
- obj_priv->agp_type = AGP_USER_MEMORY;
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ mapping_set_gfp_mask(mapping, mask);
+
+ i915_gem_object_init(obj, &i915_gem_object_ops);
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj_priv->list);
- INIT_LIST_HEAD(&obj_priv->fence_list);
- obj_priv->madv = I915_MADV_WILLNEED;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+ if (HAS_LLC(dev)) {
+ /* On some devices, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ obj->cache_level = I915_CACHE_LLC;
+ } else
+ obj->cache_level = I915_CACHE_NONE;
trace_i915_gem_object_create(obj);
- return 0;
+ return obj;
}
-void i915_gem_free_object(struct drm_gem_object *obj)
+static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ /* If we are the last user of the backing storage (be it shmemfs
+ * pages or stolen etc), we know that the pages are going to be
+ * immediately released. In this case, we can then skip copying
+ * back the contents from the GPU.
+ */
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return false;
+
+ if (obj->base.filp == NULL)
+ return true;
+
+ /* At first glance, this looks racy, but then again so would be
+ * userspace racing mmap against close. However, the first external
+ * reference to the filp can only be obtained through the
+ * i915_gem_mmap_ioctl() which safeguards us against the user
+ * acquiring such a reference whilst we are in the middle of
+ * freeing the object.
+ */
+ return atomic_long_read(&obj->base.filp->f_count) == 1;
+}
+
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_vma *vma, *next;
+
+ intel_runtime_pm_get(dev_priv);
trace_i915_gem_object_destroy(obj);
- while (obj_priv->pin_count > 0)
- i915_gem_object_unpin(obj);
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ int ret;
+
+ vma->pin_count = 0;
+ ret = i915_vma_unbind(vma);
+ if (WARN_ON(ret == -ERESTARTSYS)) {
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ WARN_ON(i915_vma_unbind(vma));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+ }
+
+ i915_gem_object_detach_phys(obj);
+
+ /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
+ * before progressing. */
+ if (obj->stolen)
+ i915_gem_object_unpin_pages(obj);
+
+ if (WARN_ON(obj->pages_pin_count))
+ obj->pages_pin_count = 0;
+ if (discard_backing_storage(obj))
+ obj->madv = I915_MADV_DONTNEED;
+ i915_gem_object_put_pages(obj);
+ i915_gem_object_free_mmap_offset(obj);
+ i915_gem_object_release_stolen(obj);
+
+ BUG_ON(obj->pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
- if (obj_priv->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
+ if (obj->ops->release)
+ obj->ops->release(obj);
- i915_gem_object_unbind(obj);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev_priv, obj->base.size);
- if (obj_priv->mmap_offset)
- i915_gem_free_mmap_offset(obj);
+ kfree(obj->bit_17);
+ i915_gem_object_free(obj);
- kfree(obj_priv->page_cpu_valid);
- kfree(obj_priv->bit_17);
- kfree(obj->driver_private);
+ intel_runtime_pm_put(dev_priv);
}
-/** Unbinds all inactive objects. */
-static int
-i915_gem_evict_from_inactive_list(struct drm_device *dev)
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma;
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
+ return NULL;
+}
- obj = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->obj;
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+ WARN_ON(vma->node.allocated);
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
- }
- }
+ /* Keep the vma as a placeholder in the execbuffer reservation lists */
+ if (!list_empty(&vma->exec_list))
+ return;
- return 0;
+ list_del(&vma->vma_link);
+
+ kfree(vma);
+}
+
+static void
+i915_gem_stop_ringbuffers(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ intel_stop_ring_buffer(ring);
}
int
-i915_gem_idle(struct drm_device *dev)
+i915_gem_suspend(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno, cur_seqno, last_seqno;
- int stuck, ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
mutex_lock(&dev->struct_mutex);
+ if (dev_priv->ums.mm_suspended)
+ goto err;
- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto err;
+
+ i915_gem_retire_requests(dev);
+
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev);
+
+ i915_kernel_lost_context(dev);
+ i915_gem_stop_ringbuffers(dev);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
+ * And not confound ums.mm_suspended!
*/
- dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
-
- /* Cancel the retire work handler, wait for it to finish if running
- */
+ dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
+ DRIVER_MODESET);
mutex_unlock(&dev->struct_mutex);
+
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- mutex_lock(&dev->struct_mutex);
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
- i915_kernel_lost_context(dev);
+ return 0;
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+err:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
- if (seqno == 0) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
- }
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+ u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+ int i, ret;
- dev_priv->mm.waiting_gem_seqno = seqno;
- last_seqno = 0;
- stuck = 0;
- for (;;) {
- cur_seqno = i915_get_gem_seqno(dev);
- if (i915_seqno_passed(cur_seqno, seqno))
- break;
- if (last_seqno == cur_seqno) {
- if (stuck++ > 100) {
- DRM_ERROR("hardware wedged\n");
- atomic_set(&dev_priv->mm.wedged, 1);
- DRM_WAKEUP(&dev_priv->irq_queue);
- break;
- }
- }
- msleep(10);
- last_seqno = cur_seqno;
- }
- dev_priv->mm.waiting_gem_seqno = 0;
+ if (!HAS_L3_DPF(dev) || !remap_info)
+ return 0;
- i915_gem_retire_requests(dev);
+ ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+ if (ret)
+ return ret;
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!atomic_read(&dev_priv->mm.wedged)) {
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- WARN_ON(!list_empty(&dev_priv->mm.active_list));
- WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
- /* Request should now be empty as we've also waited
- * for the last request in the list
- */
- WARN_ON(!list_empty(&dev_priv->mm.request_list));
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
+ for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, reg_base + i);
+ intel_ring_emit(ring, remap_info[i/4]);
}
- /* Empty the active and flushing lists to inactive. If there's
- * anything left at this point, it means that we're wedged and
- * nothing good's going to happen by leaving them there. So strip
- * the GPU domains and just stuff them onto inactive.
- */
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
+ intel_ring_advance(ring);
- obj = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
+ return ret;
+}
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
+void i915_gem_init_swizzling(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
+ if (INTEL_INFO(dev)->gen < 5 ||
+ dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_TILE_SURFACE_SWIZZLING);
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
+ if (IS_GEN5(dev))
+ return;
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+ if (IS_GEN6(dev))
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else if (IS_GEN7(dev))
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else if (IS_GEN8(dev))
+ I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ else
+ BUG();
+}
- /* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_inactive_list(dev);
- WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+static bool
+intel_enable_blt(struct drm_device *dev)
+{
+ if (!HAS_BLT(dev))
+ return false;
- i915_gem_cleanup_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
+ /* The blitter was dysfunctional on early prototypes */
+ if (IS_GEN6(dev) && dev->pdev->revision < 8) {
+ DRM_INFO("BLT not supported on this pre-production hardware;"
+ " graphics performance will be degraded.\n");
+ return false;
+ }
- return 0;
+ return true;
}
-static int
-i915_gem_init_hws(struct drm_device *dev)
+static int i915_gem_init_rings(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /* If we need a physical address for the status page, it's already
- * initialized at driver load time.
- */
- if (!I915_NEED_GFX_HWS(dev))
- return 0;
+ ret = intel_init_render_ring_buffer(dev);
+ if (ret)
+ return ret;
- obj = drm_gem_object_alloc(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ if (HAS_BSD(dev)) {
+ ret = intel_init_bsd_ring_buffer(dev);
+ if (ret)
+ goto cleanup_render_ring;
}
- obj_priv = obj->driver_private;
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- return ret;
+ if (intel_enable_blt(dev)) {
+ ret = intel_init_blt_ring_buffer(dev);
+ if (ret)
+ goto cleanup_bsd_ring;
}
- dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+ if (HAS_VEBOX(dev)) {
+ ret = intel_init_vebox_ring_buffer(dev);
+ if (ret)
+ goto cleanup_blt_ring;
+ }
- dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
- if (dev_priv->hw_status_page == NULL) {
- DRM_ERROR("Failed to map status page.\n");
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- return -EINVAL;
+ if (HAS_BSD2(dev)) {
+ ret = intel_init_bsd2_ring_buffer(dev);
+ if (ret)
+ goto cleanup_vebox_ring;
}
- dev_priv->hws_obj = obj;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
- DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+
+ ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+ if (ret)
+ goto cleanup_bsd2_ring;
return 0;
+
+cleanup_bsd2_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
+cleanup_vebox_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+cleanup_blt_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+cleanup_bsd_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+cleanup_render_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+
+ return ret;
}
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
+int
+i915_gem_init_hw(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
- if (dev_priv->hws_obj == NULL)
- return;
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
- obj = dev_priv->hws_obj;
- obj_priv = obj->driver_private;
+ if (dev_priv->ellc_size)
+ I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+
+ if (IS_HASWELL(dev))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+
+ if (HAS_PCH_NOP(dev)) {
+ if (IS_IVYBRIDGE(dev)) {
+ u32 temp = I915_READ(GEN7_MSG_CTL);
+ temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+ I915_WRITE(GEN7_MSG_CTL, temp);
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
+ temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
+ }
+ }
- kunmap(obj_priv->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->hws_obj = NULL;
+ i915_gem_init_swizzling(dev);
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- dev_priv->hw_status_page = NULL;
+ ret = i915_gem_init_rings(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_L3_SLICES(dev); i++)
+ i915_gem_l3_remap(&dev_priv->ring[RCS], i);
- /* Write high address into HWS_PGA when disabling. */
- I915_WRITE(HWS_PGA, 0x1ffff000);
+ /*
+ * XXX: Contexts should only be initialized once. Doing a switch to the
+ * default context switch however is something we'd like to do after
+ * reset or thaw (the latter may not actually be necessary for HW, but
+ * goes with our code better). Context switching requires rings (for
+ * the do_switch), but before enabling PPGTT. So don't move this.
+ */
+ ret = i915_gem_context_enable(dev_priv);
+ if (ret && ret != -EIO) {
+ DRM_ERROR("Context enable failed %d\n", ret);
+ i915_gem_cleanup_ringbuffer(dev);
+ }
+
+ return ret;
}
-int
-i915_gem_init_ringbuffer(struct drm_device *dev)
+int i915_gem_init(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- u32 head;
- ret = i915_gem_init_hws(dev);
- if (ret != 0)
- return ret;
+ mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_alloc(dev, 128 * 1024);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- i915_gem_cleanup_hws(dev);
- return -ENOMEM;
+ if (IS_VALLEYVIEW(dev)) {
+ /* VLVA0 (potential hack), BIOS isn't actually waking us */
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
+ if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_ALLOWWAKEACK), 10))
+ DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
- obj_priv = obj->driver_private;
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
+ i915_gem_init_userptr(dev);
+ i915_gem_init_global_gtt(dev);
+
+ ret = i915_gem_context_init(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
- /* Set up the kernel mapping for the ring. */
- ring->Size = obj->size;
-
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
- ring->map.size = obj->size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return -EINVAL;
- }
- ring->ring_obj = obj;
- ring->virtual_start = ring->map.handle;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
- DRM_ERROR("Ring head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- I915_WRITE(PRB0_HEAD, 0);
-
- DRM_ERROR("Ring head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- }
-
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
-
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* If the head is still not zero, the ring is dead */
- if (head != 0) {
- DRM_ERROR("Ring initialization failed "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- return -EIO;
+ ret = i915_gem_init_hw(dev);
+ if (ret == -EIO) {
+ /* Allow ring initialisation to fail by marking the GPU as
+ * wedged. But we only want to do this where the GPU is angry,
+ * for all other failure, such as an allocation failure, bail.
+ */
+ DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ ret = 0;
}
+ mutex_unlock(&dev->struct_mutex);
- /* Update our cache of the ring state */
+ /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- }
-
- return 0;
+ dev_priv->dri1.allow_batchbuffer = 1;
+ return ret;
}
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (dev_priv->ring.ring_obj == NULL)
- return;
-
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
- i915_gem_object_unpin(dev_priv->ring.ring_obj);
- drm_gem_object_unreference(dev_priv->ring.ring_obj);
- dev_priv->ring.ring_obj = NULL;
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
- i915_gem_cleanup_hws(dev);
+ for_each_ring(ring, dev_priv, i)
+ intel_cleanup_ring_buffer(ring);
}
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
+ dev_priv->ums.mm_suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
+ ret = i915_gem_init_hw(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
- spin_lock(&dev_priv->mm.active_list_lock);
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ ret = drm_irq_install(dev, dev->pdev->irq);
+ if (ret)
+ goto cleanup_ringbuffer;
mutex_unlock(&dev->struct_mutex);
- drm_irq_install(dev);
-
return 0;
+
+cleanup_ringbuffer:
+ i915_gem_cleanup_ringbuffer(dev);
+ dev_priv->ums.mm_suspended = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
int
@@ -4759,8 +4818,11 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
+ mutex_lock(&dev->struct_mutex);
drm_irq_uninstall(dev);
- return i915_gem_idle(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return i915_gem_suspend(dev);
}
void
@@ -4771,358 +4833,388 @@ i915_gem_lastclose(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ret = i915_gem_idle(dev);
+ ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
}
+static void
+init_ring_lists(struct intel_engine_cs *ring)
+{
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+}
+
+void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm)
+{
+ if (!i915_is_ggtt(vm))
+ drm_mm_init(&vm->mm, vm->start, vm->total);
+ vm->dev = dev_priv->dev;
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->global_link);
+ list_add_tail(&vm->global_link, &dev_priv->vm_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- drm_i915_private_t *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->mm.active_list_lock);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ dev_priv->slab =
+ kmem_cache_create("i915_gem_object",
+ sizeof(struct drm_i915_gem_object), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+ i915_init_vm(dev_priv, &dev_priv->gtt.base);
+
+ INIT_LIST_HEAD(&dev_priv->context_list);
+ INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->ring[i]);
+ for (i = 0; i < I915_MAX_NUM_FENCES; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
+ INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+ i915_gem_idle_work_handler);
+ init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ }
- spin_lock(&shrink_list_lock);
- list_add(&dev_priv->mm.shrink_list, &shrink_list);
- spin_unlock(&shrink_list_lock);
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
/* Old X drivers will take 0-2 for front, back, depth buffers */
- dev_priv->fence_reg_start = 3;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
- if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
+ dev_priv->num_fence_regs = 32;
+ else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- if (IS_I965G(dev)) {
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
- }
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ i915_gem_restore_fences(dev);
+
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
-}
-
-/*
- * Create a physically contiguous memory object for this object
- * e.g. for cursor + overlay regs
- */
-int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_phys_object *phys_obj;
- int ret;
-
- if (dev_priv->mm.phys_objs[id - 1] || !size)
- return 0;
-
- phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
- if (!phys_obj)
- return -ENOMEM;
- phys_obj->id = id;
-
- phys_obj->handle = drm_pci_alloc(dev, size, 0);
- if (!phys_obj->handle) {
- ret = -ENOMEM;
- goto kfree_obj;
- }
-#ifdef CONFIG_X86
- set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
+ dev_priv->mm.interruptible = true;
- dev_priv->mm.phys_objs[id - 1] = phys_obj;
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
- return 0;
-kfree_obj:
- kfree(phys_obj);
- return ret;
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
}
-void i915_gem_free_phys_object(struct drm_device *dev, int id)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_phys_object *phys_obj;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
- if (!dev_priv->mm.phys_objs[id - 1])
- return;
+ cancel_delayed_work_sync(&file_priv->mm.idle_work);
- phys_obj = dev_priv->mm.phys_objs[id - 1];
- if (phys_obj->cur_obj) {
- i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
- }
+ /* Clean up our request list when the client is going away, so that
+ * later retire_requests won't dereference our soon-to-be-gone
+ * file_priv.
+ */
+ spin_lock(&file_priv->mm.lock);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
-#endif
- drm_pci_free(dev, phys_obj->handle);
- kfree(phys_obj);
- dev_priv->mm.phys_objs[id - 1] = NULL;
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ spin_unlock(&file_priv->mm.lock);
}
-void i915_gem_free_all_phys_object(struct drm_device *dev)
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
{
- int i;
+ struct drm_i915_file_private *file_priv =
+ container_of(work, typeof(*file_priv), mm.idle_work.work);
- for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
- i915_gem_free_phys_object(dev, i);
+ atomic_set(&file_priv->rps_wait_boost, false);
}
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj)
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv;
- int i;
+ struct drm_i915_file_private *file_priv;
int ret;
- int page_count;
- obj_priv = obj->driver_private;
- if (!obj_priv->phys_obj)
- return;
+ DRM_DEBUG_DRIVER("\n");
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto out;
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
- page_count = obj->size / PAGE_SIZE;
+ file->driver_priv = file_priv;
+ file_priv->dev_priv = dev->dev_private;
+ file_priv->file = file;
- for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
- char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+ spin_lock_init(&file_priv->mm.lock);
+ INIT_LIST_HEAD(&file_priv->mm.request_list);
+ INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+ i915_gem_file_idle_work_handler);
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst, KM_USER0);
- }
- drm_clflush_pages(obj_priv->pages, page_count);
- drm_agp_chipset_flush(dev);
+ ret = i915_gem_context_open(dev, file);
+ if (ret)
+ kfree(file_priv);
- i915_gem_object_put_pages(obj);
-out:
- obj_priv->phys_obj->cur_obj = NULL;
- obj_priv->phys_obj = NULL;
+ return ret;
}
-int
-i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj, int id)
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
- int page_count;
- int i;
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
- if (id > I915_MAX_PHYS_OBJECT)
- return -EINVAL;
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
- obj_priv = obj->driver_private;
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
- if (obj_priv->phys_obj) {
- if (obj_priv->phys_obj->id == id)
- return 0;
- i915_gem_detach_phys_object(dev, obj);
- }
+ *unlock = false;
+ } else
+ *unlock = true;
+ return true;
+}
- /* create a new object */
- if (!dev_priv->mm.phys_objs[id - 1]) {
- ret = i915_gem_init_phys_object(dev, id,
- obj->size);
- if (ret) {
- DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
- goto out;
- }
- }
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
- /* bind to the object */
- obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
- obj_priv->phys_obj->cur_obj = obj;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- DRM_ERROR("failed to get page list\n");
- goto out;
- }
+ return count;
+}
- page_count = obj->size / PAGE_SIZE;
+static unsigned long
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long count;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
- for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
- char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+ count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ if (obj->pages_pin_count == 0)
+ count += obj->base.size >> PAGE_SHIFT;
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(src, KM_USER0);
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
+ count += obj->base.size >> PAGE_SHIFT;
}
- i915_gem_object_put_pages(obj);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
- return 0;
-out:
- return ret;
+ return count;
}
-static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+/* All the new VM stuff */
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- void *obj_addr;
- int ret;
- char __user *user_data;
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ if (!dev_priv->mm.aliasing_ppgtt ||
+ vm == &dev_priv->mm.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
- DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
- ret = copy_from_user(obj_addr, user_data, args->size);
- if (ret)
- return -EFAULT;
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return vma->node.start;
- drm_agp_chipset_flush(dev);
- return 0;
+ }
+ return -1;
}
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct i915_vma *vma;
- /* Clean up our request list when the client is going away, so that
- * later retire_requests won't dereference our soon-to-be-gone
- * file_priv.
- */
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list))
- list_del_init(i915_file_priv->mm.request_list.next);
- mutex_unlock(&dev->struct_mutex);
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
+
+ return false;
}
-static int
-i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
-{
- drm_i915_private_t *dev_priv, *next_dev;
- struct drm_i915_gem_object *obj_priv, *next_obj;
- int cnt = 0;
- int would_deadlock = 1;
-
- /* "fast-path" to count number of available objects */
- if (nr_to_scan == 0) {
- spin_lock(&shrink_list_lock);
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (mutex_trylock(&dev->struct_mutex)) {
- list_for_each_entry(obj_priv,
- &dev_priv->mm.inactive_list,
- list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- }
- }
- spin_unlock(&shrink_list_lock);
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+ struct i915_vma *vma;
- return (cnt / 100) * sysctl_vfs_cache_pressure;
- }
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ return true;
- spin_lock(&shrink_list_lock);
+ return false;
+}
- /* first scan for clean buffers */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
+ if (!dev_priv->mm.aliasing_ppgtt ||
+ vm == &dev_priv->mm.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
- spin_unlock(&shrink_list_lock);
+ BUG_ON(list_empty(&o->vma_list));
- i915_gem_retire_requests(dev);
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma->node.size;
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- list) {
- if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(obj_priv->obj);
- if (--nr_to_scan <= 0)
- break;
- }
- }
+ return 0;
+}
- spin_lock(&shrink_list_lock);
+static unsigned long
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long freed;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+ if (freed < sc->nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ false);
+ if (unlock)
mutex_unlock(&dev->struct_mutex);
- would_deadlock = 0;
+ return freed;
+}
- if (nr_to_scan <= 0)
- break;
- }
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed;
+ bool was_interruptible;
+ bool unlock;
- /* second pass, evict/count anything still on the inactive list */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ schedule_timeout_killable(1);
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
- spin_unlock(&shrink_list_lock);
+ freed = i915_gem_shrink_all(dev_priv);
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- list) {
- if (nr_to_scan > 0) {
- i915_gem_object_unbind(obj_priv->obj);
- nr_to_scan--;
- } else
- cnt++;
- }
+ dev_priv->mm.interruptible = was_interruptible;
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
- would_deadlock = 0;
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
}
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
- spin_unlock(&shrink_list_lock);
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
- if (would_deadlock)
- return -1;
- else if (cnt > 0)
- return (cnt / 100) * sysctl_vfs_cache_pressure;
- else
- return 0;
-}
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
-static struct shrinker shrinker = {
- .shrink = i915_gem_shrink,
- .seeks = DEFAULT_SEEKS,
-};
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
-__init void
-i915_gem_shrinker_init(void)
-{
- register_shrinker(&shrinker);
+ *(unsigned long *)ptr += freed;
+ return NOTIFY_DONE;
}
-__exit void
-i915_gem_shrinker_exit(void)
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
- unregister_shrinker(&shrinker);
+ struct i915_vma *vma;
+
+ /* This WARN has probably outlived its usefulness (callers already
+ * WARN if they don't find the GGTT vma they expect). When removing,
+ * remember to remove the pre-check in is_pin_display() as well */
+ if (WARN_ON(list_empty(&obj->vma_list)))
+ return NULL;
+
+ vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+ if (vma->vm != obj_to_ggtt(obj))
+ return NULL;
+
+ return vma;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
new file mode 100644
index 00000000000..a5ddf3bce9c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -0,0 +1,810 @@
+/*
+ * Copyright © 2011-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+/*
+ * This file implements HW context support. On gen5+ a HW context consists of an
+ * opaque GPU object which is referenced at times of context saves and restores.
+ * With RC6 enabled, the context is also referenced as the GPU enters and exists
+ * from RC6 (GPU has it's own internal power context, except on gen5). Though
+ * something like a context does exist for the media ring, the code only
+ * supports contexts for the render ring.
+ *
+ * In software, there is a distinction between contexts created by the user,
+ * and the default HW context. The default HW context is used by GPU clients
+ * that do not request setup of their own hardware context. The default
+ * context's state is never restored to help prevent programming errors. This
+ * would happen if a client ran and piggy-backed off another clients GPU state.
+ * The default context only exists to give the GPU some offset to load as the
+ * current to invoke a save of the context we actually care about. In fact, the
+ * code could likely be constructed, albeit in a more complicated fashion, to
+ * never use the default context, though that limits the driver's ability to
+ * swap out, and/or destroy other contexts.
+ *
+ * All other contexts are created as a request by the GPU client. These contexts
+ * store GPU state, and thus allow GPU clients to not re-emit state (and
+ * potentially query certain state) at any time. The kernel driver makes
+ * certain that the appropriate commands are inserted.
+ *
+ * The context life cycle is semi-complicated in that context BOs may live
+ * longer than the context itself because of the way the hardware, and object
+ * tracking works. Below is a very crude representation of the state machine
+ * describing the context life.
+ * refcount pincount active
+ * S0: initial state 0 0 0
+ * S1: context created 1 0 0
+ * S2: context is currently running 2 1 X
+ * S3: GPU referenced, but not current 2 0 1
+ * S4: context is current, but destroyed 1 1 0
+ * S5: like S3, but destroyed 1 0 1
+ *
+ * The most common (but not all) transitions:
+ * S0->S1: client creates a context
+ * S1->S2: client submits execbuf with context
+ * S2->S3: other clients submits execbuf with context
+ * S3->S1: context object was retired
+ * S3->S2: clients submits another execbuf
+ * S2->S4: context destroy called with current context
+ * S3->S5->S0: destroy path
+ * S4->S5->S0: destroy path on current context
+ *
+ * There are two confusing terms used above:
+ * The "current context" means the context which is currently running on the
+ * GPU. The GPU has loaded its state already and has stored away the gtt
+ * offset of the BO. The GPU is not actively referencing the data at this
+ * offset, but it will on the next context switch. The only way to avoid this
+ * is to do a GPU reset.
+ *
+ * An "active context' is one which was previously the "current context" and is
+ * on the active list waiting for the next context switch to occur. Until this
+ * happens, the object must remain at the same gtt offset. It is therefore
+ * possible to destroy a context, but it is still active.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/* This is a HW constraint. The value below is the largest known requirement
+ * I've seen in a spec to date, and that was a workaround for a non-shipping
+ * part. It should be safe to decrease this, but it's more future proof as is.
+ */
+#define GEN6_CONTEXT_ALIGN (64<<10)
+#define GEN7_CONTEXT_ALIGN 4096
+
+static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &ppgtt->base;
+
+ if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
+ (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
+ ppgtt->base.cleanup(&ppgtt->base);
+ return;
+ }
+
+ /*
+ * Make sure vmas are unbound before we take down the drm_mm
+ *
+ * FIXME: Proper refcounting should take care of this, this shouldn't be
+ * needed at all.
+ */
+ if (!list_empty(&vm->active_list)) {
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &vm->active_list, mm_list)
+ if (WARN_ON(list_empty(&vma->vma_link) ||
+ list_is_singular(&vma->vma_link)))
+ break;
+
+ i915_gem_evict_vm(&ppgtt->base, true);
+ } else {
+ i915_gem_retire_requests(dev);
+ i915_gem_evict_vm(&ppgtt->base, false);
+ }
+
+ ppgtt->base.cleanup(&ppgtt->base);
+}
+
+static void ppgtt_release(struct kref *kref)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(kref, struct i915_hw_ppgtt, ref);
+
+ do_ppgtt_cleanup(ppgtt);
+ kfree(ppgtt);
+}
+
+static size_t get_context_alignment(struct drm_device *dev)
+{
+ if (IS_GEN6(dev))
+ return GEN6_CONTEXT_ALIGN;
+
+ return GEN7_CONTEXT_ALIGN;
+}
+
+static int get_context_size(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ u32 reg;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ reg = I915_READ(CXT_SIZE);
+ ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ case 7:
+ reg = I915_READ(GEN7_CXT_SIZE);
+ if (IS_HASWELL(dev))
+ ret = HSW_CXT_TOTAL_SIZE;
+ else
+ ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ case 8:
+ ret = GEN8_CXT_TOTAL_SIZE;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+void i915_gem_context_free(struct kref *ctx_ref)
+{
+ struct intel_context *ctx = container_of(ctx_ref,
+ typeof(*ctx), ref);
+ struct i915_hw_ppgtt *ppgtt = NULL;
+
+ if (ctx->obj) {
+ /* We refcount even the aliasing PPGTT to keep the code symmetric */
+ if (USES_PPGTT(ctx->obj->base.dev))
+ ppgtt = ctx_to_ppgtt(ctx);
+
+ /* XXX: Free up the object before tearing down the address space, in
+ * case we're bound in the PPGTT */
+ drm_gem_object_unreference(&ctx->obj->base);
+ }
+
+ if (ppgtt)
+ kref_put(&ppgtt->ref, ppgtt_release);
+ list_del(&ctx->link);
+ kfree(ctx);
+}
+
+static struct i915_hw_ppgtt *
+create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int ret;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = i915_gem_init_ppgtt(dev, ppgtt);
+ if (ret) {
+ kfree(ppgtt);
+ return ERR_PTR(ret);
+ }
+
+ ppgtt->ctx = ctx;
+ return ppgtt;
+}
+
+static struct intel_context *
+__create_hw_context(struct drm_device *dev,
+ struct drm_i915_file_private *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ctx->ref);
+ list_add_tail(&ctx->link, &dev_priv->context_list);
+
+ if (dev_priv->hw_context_size) {
+ ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ if (ctx->obj == NULL) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ */
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
+ ret = i915_gem_object_set_cache_level(ctx->obj,
+ I915_CACHE_L3_LLC);
+ /* Failure shouldn't ever happen this early */
+ if (WARN_ON(ret))
+ goto err_out;
+ }
+ }
+
+ /* Default context will never have a file_priv */
+ if (file_priv != NULL) {
+ ret = idr_alloc(&file_priv->context_idr, ctx,
+ DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto err_out;
+ } else
+ ret = DEFAULT_CONTEXT_ID;
+
+ ctx->file_priv = file_priv;
+ ctx->id = ret;
+ /* NB: Mark all slices as needing a remap so that when the context first
+ * loads it will restore whatever remap state already exists. If there
+ * is no remap info, it will be a NOP. */
+ ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+
+ return ctx;
+
+err_out:
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(ret);
+}
+
+/**
+ * The default context needs to exist per ring that uses contexts. It stores the
+ * context state of the GPU for applications that don't utilize HW contexts, as
+ * well as an idle case.
+ */
+static struct intel_context *
+i915_gem_create_context(struct drm_device *dev,
+ struct drm_i915_file_private *file_priv,
+ bool create_vm)
+{
+ const bool is_global_default_ctx = file_priv == NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_context *ctx;
+ int ret = 0;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ctx = __create_hw_context(dev, file_priv);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ if (is_global_default_ctx && ctx->obj) {
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = i915_gem_obj_ggtt_pin(ctx->obj,
+ get_context_alignment(dev), 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+ goto err_destroy;
+ }
+ }
+
+ if (create_vm) {
+ struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
+
+ if (IS_ERR_OR_NULL(ppgtt)) {
+ DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
+ PTR_ERR(ppgtt));
+ ret = PTR_ERR(ppgtt);
+ goto err_unpin;
+ } else
+ ctx->vm = &ppgtt->base;
+
+ /* This case is reserved for the global default context and
+ * should only happen once. */
+ if (is_global_default_ctx) {
+ if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
+ ret = -EEXIST;
+ goto err_unpin;
+ }
+
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+ }
+ } else if (USES_PPGTT(dev)) {
+ /* For platforms which only have aliasing PPGTT, we fake the
+ * address space and refcounting. */
+ ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
+ kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
+ } else
+ ctx->vm = &dev_priv->gtt.base;
+
+ return ctx;
+
+err_unpin:
+ if (is_global_default_ctx && ctx->obj)
+ i915_gem_object_ggtt_unpin(ctx->obj);
+err_destroy:
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(ret);
+}
+
+void i915_gem_context_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Prevent the hardware from restoring the last context (which hung) on
+ * the next switch */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
+ struct intel_context *dctx = ring->default_context;
+
+ /* Do a fake switch to the default context */
+ if (ring->last_context == dctx)
+ continue;
+
+ if (!ring->last_context)
+ continue;
+
+ if (dctx->obj && i == RCS) {
+ WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+ get_context_alignment(dev), 0));
+ /* Fake a finish/inactive */
+ dctx->obj->base.write_domain = 0;
+ dctx->obj->active = 0;
+ }
+
+ i915_gem_context_unreference(ring->last_context);
+ i915_gem_context_reference(dctx);
+ ring->last_context = dctx;
+ }
+}
+
+int i915_gem_context_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_context *ctx;
+ int i;
+
+ /* Init should only be called once per module load. Eventually the
+ * restriction on the context_disabled check can be loosened. */
+ if (WARN_ON(dev_priv->ring[RCS].default_context))
+ return 0;
+
+ if (HAS_HW_CONTEXTS(dev)) {
+ dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+ if (dev_priv->hw_context_size > (1<<20)) {
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
+ dev_priv->hw_context_size);
+ dev_priv->hw_context_size = 0;
+ }
+ }
+
+ ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
+ if (IS_ERR(ctx)) {
+ DRM_ERROR("Failed to create default global context (error %ld)\n",
+ PTR_ERR(ctx));
+ return PTR_ERR(ctx);
+ }
+
+ /* NB: RCS will hold a ref for all rings */
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ dev_priv->ring[i].default_context = ctx;
+
+ DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
+ return 0;
+}
+
+void i915_gem_context_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+ int i;
+
+ if (dctx->obj) {
+ /* The only known way to stop the gpu from accessing the hw context is
+ * to reset it. Do this as the very last operation to avoid confusing
+ * other code, leading to spurious errors. */
+ intel_gpu_reset(dev);
+
+ /* When default context is created and switched to, base object refcount
+ * will be 2 (+1 from object creation and +1 from do_switch()).
+ * i915_gem_context_fini() will be called after gpu_idle() has switched
+ * to default context. So we need to unreference the base object once
+ * to offset the do_switch part, so that i915_gem_context_unreference()
+ * can then free the base object correctly. */
+ WARN_ON(!dev_priv->ring[RCS].last_context);
+ if (dev_priv->ring[RCS].last_context == dctx) {
+ /* Fake switch to NULL context */
+ WARN_ON(dctx->obj->active);
+ i915_gem_object_ggtt_unpin(dctx->obj);
+ i915_gem_context_unreference(dctx);
+ dev_priv->ring[RCS].last_context = NULL;
+ }
+
+ i915_gem_object_ggtt_unpin(dctx->obj);
+ }
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
+
+ if (ring->last_context)
+ i915_gem_context_unreference(ring->last_context);
+
+ ring->default_context = NULL;
+ ring->last_context = NULL;
+ }
+
+ i915_gem_context_unreference(dctx);
+}
+
+int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *ring;
+ int ret, i;
+
+ /* This is the only place the aliasing PPGTT gets enabled, which means
+ * it has to happen before we bail on reset */
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ ppgtt->enable(ppgtt);
+ }
+
+ /* FIXME: We should make this work, even in reset */
+ if (i915_reset_in_progress(&dev_priv->gpu_error))
+ return 0;
+
+ BUG_ON(!dev_priv->ring[RCS].default_context);
+
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_switch_context(ring, ring->default_context);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int context_idr_cleanup(int id, void *p, void *data)
+{
+ struct intel_context *ctx = p;
+
+ i915_gem_context_unreference(ctx);
+ return 0;
+}
+
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct intel_context *ctx;
+
+ idr_init(&file_priv->context_idr);
+
+ mutex_lock(&dev->struct_mutex);
+ ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(ctx)) {
+ idr_destroy(&file_priv->context_idr);
+ return PTR_ERR(ctx);
+ }
+
+ return 0;
+}
+
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ idr_destroy(&file_priv->context_idr);
+}
+
+struct intel_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
+{
+ struct intel_context *ctx;
+
+ ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
+ if (!ctx)
+ return ERR_PTR(-ENOENT);
+
+ return ctx;
+}
+
+static inline int
+mi_set_context(struct intel_engine_cs *ring,
+ struct intel_context *new_context,
+ u32 hw_flags)
+{
+ int ret;
+
+ /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
+ * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
+ * explicitly, so we rely on the value at ring init, stored in
+ * itlb_before_ctx_switch.
+ */
+ if (IS_GEN6(ring->dev)) {
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
+ if (INTEL_INFO(ring->dev)->gen >= 7)
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ hw_flags);
+ /*
+ * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+ * WaMiSetContext_Hang:snb,ivb,vlv
+ */
+ intel_ring_emit(ring, MI_NOOP);
+
+ if (INTEL_INFO(ring->dev)->gen >= 7)
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return ret;
+}
+
+static int do_switch(struct intel_engine_cs *ring,
+ struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_context *from = ring->last_context;
+ struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
+ u32 hw_flags = 0;
+ bool uninitialized = false;
+ int ret, i;
+
+ if (from != NULL && ring == &dev_priv->ring[RCS]) {
+ BUG_ON(from->obj == NULL);
+ BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+ }
+
+ if (from == to && from->last_ring == ring && !to->remap_slice)
+ return 0;
+
+ /* Trying to pin first makes error handling easier. */
+ if (ring == &dev_priv->ring[RCS]) {
+ ret = i915_gem_obj_ggtt_pin(to->obj,
+ get_context_alignment(ring->dev), 0);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Pin can switch back to the default context if we end up calling into
+ * evict_everything - as a last ditch gtt defrag effort that also
+ * switches to the default context. Hence we need to reload from here.
+ */
+ from = ring->last_context;
+
+ if (USES_FULL_PPGTT(ring->dev)) {
+ ret = ppgtt->switch_mm(ppgtt, ring, false);
+ if (ret)
+ goto unpin_out;
+ }
+
+ if (ring != &dev_priv->ring[RCS]) {
+ if (from)
+ i915_gem_context_unreference(from);
+ goto done;
+ }
+
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out. Note
+ * that thanks to write = false in this call and us not setting any gpu
+ * write domains when putting a context object onto the active list
+ * (when switching away from it), this won't block.
+ *
+ * XXX: We need a real interface to do this instead of trickery.
+ */
+ ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ if (ret)
+ goto unpin_out;
+
+ if (!to->obj->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ &dev_priv->gtt.base);
+ vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ }
+
+ if (!to->is_initialized || i915_gem_context_is_default(to))
+ hw_flags |= MI_RESTORE_INHIBIT;
+
+ ret = mi_set_context(ring, to, hw_flags);
+ if (ret)
+ goto unpin_out;
+
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to->remap_slice & (1<<i)))
+ continue;
+
+ ret = i915_gem_l3_remap(ring, i);
+ /* If it failed, try again next round */
+ if (ret)
+ DRM_DEBUG_DRIVER("L3 remapping failed\n");
+ else
+ to->remap_slice &= ~(1<<i);
+ }
+
+ /* The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. In fact, the below code
+ * is a bit suboptimal because the retiring can occur simply after the
+ * MI_SET_CONTEXT instead of when the next seqno has completed.
+ */
+ if (from != NULL) {
+ from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
+ /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
+ * whole damn pipeline, we don't need to explicitly mark the
+ * object dirty. The only exception is that the context must be
+ * correct in case the object gets swapped out. Ideally we'd be
+ * able to defer doing this until we know the object would be
+ * swapped, but there is no way to do that yet.
+ */
+ from->obj->dirty = 1;
+ BUG_ON(from->obj->ring != ring);
+
+ /* obj is kept alive until the next request by its active ref */
+ i915_gem_object_ggtt_unpin(from->obj);
+ i915_gem_context_unreference(from);
+ }
+
+ uninitialized = !to->is_initialized && from == NULL;
+ to->is_initialized = true;
+
+done:
+ i915_gem_context_reference(to);
+ ring->last_context = to;
+ to->last_ring = ring;
+
+ if (uninitialized) {
+ ret = i915_gem_render_state_init(ring);
+ if (ret)
+ DRM_ERROR("init render state: %d\n", ret);
+ }
+
+ return 0;
+
+unpin_out:
+ if (ring->id == RCS)
+ i915_gem_object_ggtt_unpin(to->obj);
+ return ret;
+}
+
+/**
+ * i915_switch_context() - perform a GPU context switch.
+ * @ring: ring for which we'll execute the context switch
+ * @to: the context to switch to
+ *
+ * The context life cycle is simple. The context refcount is incremented and
+ * decremented by 1 and create and destroy. If the context is in use by the GPU,
+ * it will have a refoucnt > 1. This allows us to destroy the context abstract
+ * object while letting the normal object tracking destroy the backing BO.
+ */
+int i915_switch_context(struct intel_engine_cs *ring,
+ struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+ if (to->obj == NULL) { /* We have the fake context */
+ if (to != ring->last_context) {
+ i915_gem_context_reference(to);
+ if (ring->last_context)
+ i915_gem_context_unreference(ring->last_context);
+ ring->last_context = to;
+ }
+ return 0;
+ }
+
+ return do_switch(ring, to);
+}
+
+static bool hw_context_enabled(struct drm_device *dev)
+{
+ return to_i915(dev)->hw_context_size;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_context_create *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct intel_context *ctx;
+ int ret;
+
+ if (!hw_context_enabled(dev))
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ args->ctx_id = ctx->id;
+ DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+
+ return 0;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_context_destroy *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct intel_context *ctx;
+ int ret;
+
+ if (args->ctx_id == DEFAULT_CONTEXT_ID)
+ return -ENOENT;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ i915_gem_context_unreference(ctx);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614bd3f..f462d1b51d9 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -25,179 +25,94 @@
*
*/
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-#if WATCH_INACTIVE
-void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
- if (obj_priv->pin_count || obj_priv->active ||
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)))
- DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
+ static int warned;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ if (warned)
+ return 0;
+
+ list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed render active %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid render active %p (a %d r %x)\n",
obj,
- obj_priv->pin_count, obj_priv->active,
- obj->write_domain, file, line);
- }
-}
-#endif /* WATCH_INACTIVE */
-
-
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
- uint32_t bias, uint32_t mark)
-{
- uint32_t *mem = kmap_atomic(page, KM_USER0);
- int i;
- for (i = start; i < end; i += 4)
- DRM_INFO("%08x: %08x%s\n",
- (int) (bias + i), mem[i / 4],
- (bias + i == mark) ? " ********" : "");
- kunmap_atomic(mem, KM_USER0);
- /* give syslog time to catch up */
- msleep(1);
-}
-
-void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
- const char *where, uint32_t mark)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page;
-
- DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
- for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
- int page_len, chunk, chunk_len;
-
- page_len = len - page * PAGE_SIZE;
- if (page_len > PAGE_SIZE)
- page_len = PAGE_SIZE;
-
- for (chunk = 0; chunk < page_len; chunk += 128) {
- chunk_len = page_len - chunk;
- if (chunk_len > 128)
- chunk_len = 128;
- i915_gem_dump_page(obj_priv->pages[page],
- chunk, chunk + chunk_len,
- obj_priv->gtt_offset +
- page * PAGE_SIZE,
- mark);
+ obj->active,
+ obj->base.read_domains);
+ err++;
+ } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
+ DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
+ obj,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
}
}
-}
-#endif
-
-#if WATCH_LRU
-void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
-
- DRM_INFO("active list %s {\n", where);
- spin_lock(&dev_priv->mm.active_list_lock);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
- DRM_INFO("}\n");
- DRM_INFO("flushing list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("inactive %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
-}
-#endif
-
-#if WATCH_COHERENCY
-void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page;
- uint32_t *gtt_mapping;
- uint32_t *backing_map = NULL;
- int bad_count = 0;
-
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj_priv->gtt_offset, handle,
- obj->size / 1024);
-
- gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
- obj->size);
- if (gtt_mapping == NULL) {
- DRM_ERROR("failed to map GTT space\n");
- return;
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed flushing %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
+ list_empty(&obj->gpu_write_list)) {
+ DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
+ obj,
+ obj->active,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
+ }
}
- for (page = 0; page < obj->size / PAGE_SIZE; page++) {
- int i;
-
- backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
-
- if (backing_map == NULL) {
- DRM_ERROR("failed to map backing page\n");
- goto out;
+ list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed gpu write %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
+ obj,
+ obj->active,
+ obj->base.write_domain);
+ err++;
}
+ }
- for (i = 0; i < PAGE_SIZE / 4; i++) {
- uint32_t cpuval = backing_map[i];
- uint32_t gttval = readl(gtt_mapping +
- page * 1024 + i);
-
- if (cpuval != gttval) {
- DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
- "0x%08x vs 0x%08x\n",
- (int)(obj_priv->gtt_offset +
- page * PAGE_SIZE + i * 4),
- cpuval, gttval);
- if (bad_count++ >= 8) {
- DRM_INFO("...\n");
- goto out;
- }
- }
+ list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed inactive %p\n", obj);
+ err++;
+ break;
+ } else if (obj->pin_count || obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
+ obj,
+ obj->pin_count, obj->active,
+ obj->base.write_domain);
+ err++;
}
- kunmap_atomic(backing_map, KM_USER0);
- backing_map = NULL;
}
- out:
- if (backing_map != NULL)
- kunmap_atomic(backing_map, KM_USER0);
- iounmap(gtt_mapping);
-
- /* give syslog time to catch up */
- msleep(1);
-
- /* Directly flush the object, since we just loaded values with the CPU
- * from the backing pages and we don't want to disturb the cache
- * management that we're trying to observe.
- */
-
- i915_gem_clflush_object(obj);
+ return warned = err;
}
-#endif
+#endif /* WATCH_LIST */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 00000000000..580aa42443e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include <linux/dma-buf.h>
+
+static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_intel_bo(buf->priv);
+}
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+ struct sg_table *st;
+ struct scatterlist *src, *dst;
+ int ret, i;
+
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ i915_gem_object_pin_pages(obj);
+
+ /* Copy sg so that we make an independent mapping */
+ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+
+ ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ src = obj->pages->sgl;
+ dst = st->sgl;
+ for (i = 0; i < obj->pages->nents; i++) {
+ sg_set_page(dst, sg_page(src), src->length, 0);
+ dst = sg_next(dst);
+ src = sg_next(src);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ ret =-ENOMEM;
+ goto err_free_sg;
+ }
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return st;
+
+err_free_sg:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+ return ERR_PTR(ret);
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+
+ i915_gem_object_unpin_pages(obj);
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_device *dev = obj->base.dev;
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+ int ret, i;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (obj->dma_buf_vmapping) {
+ obj->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err;
+
+ i915_gem_object_pin_pages(obj);
+
+ ret = -ENOMEM;
+
+ pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ if (pages == NULL)
+ goto err_unpin;
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+ pages[i++] = sg_page_iter_page(&sg_iter);
+
+ obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+
+ if (!obj->dma_buf_vmapping)
+ goto err_unpin;
+
+ obj->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return obj->dma_buf_vmapping;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_device *dev = obj->base.dev;
+
+ mutex_lock(&dev->struct_mutex);
+ if (--obj->vmapping_count == 0) {
+ vunmap(obj->dma_buf_vmapping);
+ obj->dma_buf_vmapping = NULL;
+
+ i915_gem_object_unpin_pages(obj);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+ bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static const struct dma_buf_ops i915_dmabuf_ops = {
+ .map_dma_buf = i915_gem_map_dma_buf,
+ .unmap_dma_buf = i915_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap = i915_gem_dmabuf_kmap,
+ .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+ .kunmap = i915_gem_dmabuf_kunmap,
+ .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .mmap = i915_gem_dmabuf_mmap,
+ .vmap = i915_gem_dmabuf_vmap,
+ .vunmap = i915_gem_dmabuf_vunmap,
+ .begin_cpu_access = i915_gem_begin_cpu_access,
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ if (obj->ops->dmabuf_export) {
+ int ret = obj->ops->dmabuf_export(obj);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
+}
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *sg;
+
+ sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ obj->pages = sg;
+ obj->has_dma_mapping = true;
+ return 0;
+}
+
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ dma_buf_unmap_attachment(obj->base.import_attach,
+ obj->pages, DMA_BIDIRECTIONAL);
+ obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .get_pages = i915_gem_object_get_pages_dmabuf,
+ .put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &i915_dmabuf_ops) {
+ obj = dma_buf_to_obj(dma_buf);
+ /* is it from our device? */
+ if (obj->base.dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(&obj->base);
+ return &obj->base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail_detach;
+ }
+
+ drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+ obj->base.import_attach = attach;
+
+ return &obj->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 00000000000..bbf4b12d842
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "i915_trace.h"
+
+static bool
+mark_free(struct i915_vma *vma, struct list_head *unwind)
+{
+ if (vma->pin_count)
+ return false;
+
+ if (WARN_ON(!list_empty(&vma->exec_list)))
+ return false;
+
+ list_add(&vma->exec_list, unwind);
+ return drm_mm_scan_add_block(&vma->node);
+}
+
+/**
+ * i915_gem_evict_something - Evict vmas to make room for binding a new one
+ * @dev: drm_device
+ * @vm: address space to evict from
+ * @size: size of the desired free space
+ * @alignment: alignment constraint of the desired free space
+ * @cache_level: cache_level for the desired space
+ * @mappable: whether the free space must be mappable
+ * @nonblocking: whether evicting active objects is allowed or not
+ *
+ * This function will try to evict vmas until a free space satisfying the
+ * requirements is found. Callers must check first whether any such hole exists
+ * already before calling this function.
+ *
+ * This function is used by the object/vma binding code.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
+int
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+ int min_size, unsigned alignment, unsigned cache_level,
+ unsigned long start, unsigned long end,
+ unsigned flags)
+{
+ struct list_head eviction_list, unwind_list;
+ struct i915_vma *vma;
+ int ret = 0;
+ int pass = 0;
+
+ trace_i915_gem_evict(dev, min_size, alignment, flags);
+
+ /*
+ * The goal is to evict objects and amalgamate space in LRU order.
+ * The oldest idle objects reside on the inactive list, which is in
+ * retirement order. The next objects to retire are those on the (per
+ * ring) active list that do not have an outstanding flush. Once the
+ * hardware reports completion (the seqno is updated after the
+ * batchbuffer has been finished) the clean buffer objects would
+ * be retired to the inactive list. Any dirty objects would be added
+ * to the tail of the flushing list. So after processing the clean
+ * active objects we need to emit a MI_FLUSH to retire the flushing
+ * list, hence the retirement order of the flushing list is in
+ * advance of the dirty objects on the active lists.
+ *
+ * The retirement sequence is thus:
+ * 1. Inactive objects (already retired)
+ * 2. Clean active objects
+ * 3. Flushing list
+ * 4. Dirty active objects.
+ *
+ * On each list, the oldest objects lie at the HEAD with the freshest
+ * object on the TAIL.
+ */
+
+ INIT_LIST_HEAD(&unwind_list);
+ if (start != 0 || end != vm->total) {
+ drm_mm_init_scan_with_range(&vm->mm, min_size,
+ alignment, cache_level,
+ start, end);
+ } else
+ drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
+
+search_again:
+ /* First see if there is a large enough contiguous idle region... */
+ list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+ if (mark_free(vma, &unwind_list))
+ goto found;
+ }
+
+ if (flags & PIN_NONBLOCK)
+ goto none;
+
+ /* Now merge in the soon-to-be-expired objects... */
+ list_for_each_entry(vma, &vm->active_list, mm_list) {
+ if (mark_free(vma, &unwind_list))
+ goto found;
+ }
+
+none:
+ /* Nothing found, clean up and bail out! */
+ while (!list_empty(&unwind_list)) {
+ vma = list_first_entry(&unwind_list,
+ struct i915_vma,
+ exec_list);
+ ret = drm_mm_scan_remove_block(&vma->node);
+ BUG_ON(ret);
+
+ list_del_init(&vma->exec_list);
+ }
+
+ /* Can we unpin some objects such as idle hw contents,
+ * or pending flips?
+ */
+ if (flags & PIN_NONBLOCK)
+ return -ENOSPC;
+
+ /* Only idle the GPU and repeat the search once */
+ if (pass++ == 0) {
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+ goto search_again;
+ }
+
+ /* If we still have pending pageflip completions, drop
+ * back to userspace to give our workqueues time to
+ * acquire our locks and unpin the old scanouts.
+ */
+ return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+
+found:
+ /* drm_mm doesn't allow any other other operations while
+ * scanning, therefore store to be evicted objects on a
+ * temporary list. */
+ INIT_LIST_HEAD(&eviction_list);
+ while (!list_empty(&unwind_list)) {
+ vma = list_first_entry(&unwind_list,
+ struct i915_vma,
+ exec_list);
+ if (drm_mm_scan_remove_block(&vma->node)) {
+ list_move(&vma->exec_list, &eviction_list);
+ drm_gem_object_reference(&vma->obj->base);
+ continue;
+ }
+ list_del_init(&vma->exec_list);
+ }
+
+ /* Unbinding will emit any required flushes */
+ while (!list_empty(&eviction_list)) {
+ struct drm_gem_object *obj;
+ vma = list_first_entry(&eviction_list,
+ struct i915_vma,
+ exec_list);
+
+ obj = &vma->obj->base;
+ list_del_init(&vma->exec_list);
+ if (ret == 0)
+ ret = i915_vma_unbind(vma);
+
+ drm_gem_object_unreference(obj);
+ }
+
+ return ret;
+}
+
+/**
+ * i915_gem_evict_vm - Evict all idle vmas from a vm
+ *
+ * @vm: Address space to cleanse
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * This function evicts all idles vmas from a vm. If all unpinned vmas should be
+ * evicted the @do_idle needs to be set to true.
+ *
+ * This is used by the execbuf code as a last-ditch effort to defragment the
+ * address space.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+ struct i915_vma *vma, *next;
+ int ret;
+
+ trace_i915_gem_evict_vm(vm);
+
+ if (do_idle) {
+ ret = i915_gpu_idle(vm->dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(vm->dev);
+ }
+
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ if (vma->pin_count == 0)
+ WARN_ON(i915_vma_unbind(vma));
+
+ return 0;
+}
+
+/**
+ * i915_gem_evict_everything - Try to evict all objects
+ * @dev: Device to evict objects for
+ *
+ * This functions tries to evict all gem objects from all address spaces. Used
+ * by the shrinker as a last-ditch effort and for suspend, before releasing the
+ * backing storage of all unbound objects.
+ */
+int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm;
+ bool lists_empty = true;
+ int ret;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
+ if (!lists_empty)
+ lists_empty = false;
+ }
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ trace_i915_gem_evict_everything(dev);
+
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+
+ /* Having flushed everything, unbind() should never raise an error */
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ WARN_ON(i915_gem_evict_vm(vm, false));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 00000000000..3a30133f93e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1548 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/dma_remapping.h>
+
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+
+#define BATCH_OFFSET_BIAS (256*1024)
+
+struct eb_vmas {
+ struct list_head vmas;
+ int and;
+ union {
+ struct i915_vma *lut[0];
+ struct hlist_head buckets[0];
+ };
+};
+
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args)
+{
+ struct eb_vmas *eb = NULL;
+
+ if (args->flags & I915_EXEC_HANDLE_LUT) {
+ unsigned size = args->buffer_count;
+ size *= sizeof(struct i915_vma *);
+ size += sizeof(struct eb_vmas);
+ eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ }
+
+ if (eb == NULL) {
+ unsigned size = args->buffer_count;
+ unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
+ while (count > 2*size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_vmas),
+ GFP_TEMPORARY);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ } else
+ eb->and = -args->buffer_count;
+
+ INIT_LIST_HEAD(&eb->vmas);
+ return eb;
+}
+
+static void
+eb_reset(struct eb_vmas *eb)
+{
+ if (eb->and >= 0)
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static int
+eb_lookup_vmas(struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ const struct drm_i915_gem_execbuffer2 *args,
+ struct i915_address_space *vm,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct list_head objects;
+ int i, ret;
+
+ INIT_LIST_HEAD(&objects);
+ spin_lock(&file->table_lock);
+ /* Grab a reference to the object and release the lock so we can lookup
+ * or create the VMA without using GFP_ATOMIC */
+ for (i = 0; i < args->buffer_count; i++) {
+ obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
+ if (obj == NULL) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!list_empty(&obj->obj_exec_link)) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ drm_gem_object_reference(&obj->base);
+ list_add_tail(&obj->obj_exec_link, &objects);
+ }
+ spin_unlock(&file->table_lock);
+
+ i = 0;
+ while (!list_empty(&objects)) {
+ struct i915_vma *vma;
+ struct i915_address_space *bind_vm = vm;
+
+ if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
+ USES_FULL_PPGTT(vm->dev)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* If we have secure dispatch, or the userspace assures us that
+ * they know what they're doing, use the GGTT VM.
+ */
+ if (((args->flags & I915_EXEC_SECURE) &&
+ (i == (args->buffer_count - 1))))
+ bind_vm = &dev_priv->gtt.base;
+
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+
+ /*
+ * NOTE: We can leak any vmas created here when something fails
+ * later on. But that's no issue since vma_unbind can deal with
+ * vmas which are not actually bound. And since only
+ * lookup_or_create exists as an interface to get at the vma
+ * from the (obj, vm) we don't run the risk of creating
+ * duplicated vmas for the same vm.
+ */
+ vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG("Failed to lookup VMA\n");
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ /* Transfer ownership from the objects list to the vmas list. */
+ list_add_tail(&vma->exec_list, &eb->vmas);
+ list_del_init(&obj->obj_exec_link);
+
+ vma->exec_entry = &exec[i];
+ if (eb->and < 0) {
+ eb->lut[i] = vma;
+ } else {
+ uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+ vma->exec_handle = handle;
+ hlist_add_head(&vma->exec_node,
+ &eb->buckets[handle & eb->and]);
+ }
+ ++i;
+ }
+
+ return 0;
+
+
+err:
+ while (!list_empty(&objects)) {
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+ list_del_init(&obj->obj_exec_link);
+ drm_gem_object_unreference(&obj->base);
+ }
+ /*
+ * Objects already transfered to the vmas list will be unreferenced by
+ * eb_destroy.
+ */
+
+ return ret;
+}
+
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
+{
+ if (eb->and < 0) {
+ if (handle >= -eb->and)
+ return NULL;
+ return eb->lut[handle];
+ } else {
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ struct i915_vma *vma;
+
+ vma = hlist_entry(node, struct i915_vma, exec_node);
+ if (vma->exec_handle == handle)
+ return vma;
+ }
+ return NULL;
+ }
+}
+
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return;
+
+ entry = vma->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ vma->pin_count--;
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
+ while (!list_empty(&eb->vmas)) {
+ struct i915_vma *vma;
+
+ vma = list_first_entry(&eb->vmas,
+ struct i915_vma,
+ exec_list);
+ list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
+ drm_gem_object_unreference(&vma->obj->base);
+ }
+ kfree(eb);
+}
+
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+ return (HAS_LLC(obj->base.dev) ||
+ obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ !obj->map_and_fenceable ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
+static int
+relocate_entry_cpu(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t page_offset = offset_in_page(reloc->offset);
+ uint64_t delta = reloc->delta + target_offset;
+ char *vaddr;
+ int ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret)
+ return ret;
+
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
+ *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+ if (page_offset == 0) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ }
+
+ *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
+ }
+
+ kunmap_atomic(vaddr);
+
+ return 0;
+}
+
+static int
+relocate_entry_gtt(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint64_t delta = reloc->delta + target_offset;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
+ int ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += i915_gem_obj_ggtt_offset(obj);
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + offset_in_page(reloc->offset));
+ iowrite32(lower_32_bits(delta), reloc_entry);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ reloc_entry += 1;
+
+ if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+ io_mapping_unmap_atomic(reloc_page);
+ reloc_page = io_mapping_map_atomic_wc(
+ dev_priv->gtt.mappable,
+ reloc->offset + sizeof(uint32_t));
+ reloc_entry = reloc_page;
+ }
+
+ iowrite32(upper_32_bits(delta), reloc_entry);
+ }
+
+ io_mapping_unmap_atomic(reloc_page);
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_i915_obj;
+ struct i915_vma *target_vma;
+ uint64_t target_offset;
+ int ret;
+
+ /* we've already hold a reference to all valid objects */
+ target_vma = eb_get_vma(eb, reloc->target_handle);
+ if (unlikely(target_vma == NULL))
+ return -ENOENT;
+ target_i915_obj = target_vma->obj;
+ target_obj = &target_vma->obj->base;
+
+ target_offset = target_vma->node.start;
+
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ struct i915_vma *vma =
+ list_first_entry(&target_i915_obj->vma_list,
+ typeof(*vma), vma_link);
+ vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+ DRM_DEBUG("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return -EINVAL;
+ }
+ if (unlikely((reloc->write_domain | reloc->read_domains)
+ & ~I915_GEM_GPU_DOMAINS)) {
+ DRM_DEBUG("reloc with read/write non-GPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return -EINVAL;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ return 0;
+
+ /* Check that the relocation address is valid... */
+ if (unlikely(reloc->offset >
+ obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+ DRM_DEBUG("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ return -EINVAL;
+ }
+ if (unlikely(reloc->offset & 3)) {
+ DRM_DEBUG("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ return -EINVAL;
+ }
+
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && in_atomic())
+ return -EFAULT;
+
+ if (use_cpu_reloc(obj))
+ ret = relocate_entry_cpu(obj, reloc, target_offset);
+ else
+ ret = relocate_entry_gtt(obj, reloc, target_offset);
+
+ if (ret)
+ return ret;
+
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+ struct eb_vmas *eb)
+{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+ struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ int remain, ret;
+
+ user_relocs = to_user_ptr(entry->relocs_ptr);
+
+ remain = entry->relocation_count;
+ while (remain) {
+ struct drm_i915_gem_relocation_entry *r = stack_reloc;
+ int count = remain;
+ if (count > ARRAY_SIZE(stack_reloc))
+ count = ARRAY_SIZE(stack_reloc);
+ remain -= count;
+
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
+ return -EFAULT;
+
+ do {
+ u64 offset = r->presumed_offset;
+
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
+ if (ret)
+ return ret;
+
+ if (r->presumed_offset != offset &&
+ __copy_to_user_inatomic(&user_relocs->presumed_offset,
+ &r->presumed_offset,
+ sizeof(r->presumed_offset))) {
+ return -EFAULT;
+ }
+
+ user_relocs++;
+ r++;
+ } while (--count);
+ }
+
+ return 0;
+#undef N_RELOC
+}
+
+static int
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
+{
+ struct i915_vma *vma;
+ int ret = 0;
+
+ /* This is the fast path and we cannot handle a pagefault whilst
+ * holding the struct mutex lest the user pass in the relocations
+ * contained within a mmaped bo. For in such a case we, the page
+ * fault handler would call i915_gem_fault() and we would try to
+ * acquire the struct mutex again. Obviously this is bad and so
+ * lockdep complains vehemently.
+ */
+ pagefault_disable();
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ ret = i915_gem_execbuffer_relocate_vma(vma, eb);
+ if (ret)
+ break;
+ }
+ pagefault_enable();
+
+ return ret;
+}
+
+static int
+need_reloc_mappable(struct i915_vma *vma)
+{
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+ i915_is_ggtt(vma->vm);
+}
+
+static int
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+ struct intel_engine_cs *ring,
+ bool *need_reloc)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool need_fence;
+ uint64_t flags;
+ int ret;
+
+ flags = 0;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ if (need_fence || need_reloc_mappable(vma))
+ flags |= PIN_MAPPABLE;
+
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+ flags |= PIN_GLOBAL;
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+ flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+ if (ret)
+ return ret;
+
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
+ if (has_fenced_gpu_access) {
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ return ret;
+
+ if (i915_gem_object_pin_fence(obj))
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+
+ obj->pending_fenced_gpu_access = true;
+ }
+ }
+
+ if (entry->offset != vma->node.start) {
+ entry->offset = vma->node.start;
+ *need_reloc = true;
+ }
+
+ if (entry->flags & EXEC_OBJECT_WRITE) {
+ obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
+ obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+ }
+
+ return 0;
+}
+
+static bool
+eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
+{
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+ bool need_fence, need_mappable;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable = need_fence || need_reloc_mappable(vma);
+
+ WARN_ON((need_mappable || need_fence) &&
+ !i915_is_ggtt(vma->vm));
+
+ if (entry->alignment &&
+ vma->node.start & (entry->alignment - 1))
+ return true;
+
+ if (need_mappable && !obj->map_and_fenceable)
+ return true;
+
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+ vma->node.start < BATCH_OFFSET_BIAS)
+ return true;
+
+ return false;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
+ struct list_head *vmas,
+ bool *need_relocs)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ struct i915_address_space *vm;
+ struct list_head ordered_vmas;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ int retry;
+
+ if (list_empty(vmas))
+ return 0;
+
+ i915_gem_retire_requests_ring(ring);
+
+ vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+ INIT_LIST_HEAD(&ordered_vmas);
+ while (!list_empty(vmas)) {
+ struct drm_i915_gem_exec_object2 *entry;
+ bool need_fence, need_mappable;
+
+ vma = list_first_entry(vmas, struct i915_vma, exec_list);
+ obj = vma->obj;
+ entry = vma->exec_entry;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable = need_fence || need_reloc_mappable(vma);
+
+ if (need_mappable)
+ list_move(&vma->exec_list, &ordered_vmas);
+ else
+ list_move_tail(&vma->exec_list, &ordered_vmas);
+
+ obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
+ obj->base.pending_write_domain = 0;
+ obj->pending_fenced_gpu_access = false;
+ }
+ list_splice(&ordered_vmas, vmas);
+
+ /* Attempt to pin all of the buffers into the GTT.
+ * This is done in 3 phases:
+ *
+ * 1a. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 1b. Increment pin count for already bound objects.
+ * 2. Bind new objects.
+ * 3. Decrement pin count.
+ *
+ * This avoid unnecessary unbinding of later objects in order to make
+ * room for the earlier objects *unless* we need to defragment.
+ */
+ retry = 0;
+ do {
+ int ret = 0;
+
+ /* Unbind any ill-fitting objects or pin. */
+ list_for_each_entry(vma, vmas, exec_list) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ if (eb_vma_misplaced(vma, has_fenced_gpu_access))
+ ret = i915_vma_unbind(vma);
+ else
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+ if (ret)
+ goto err;
+ }
+
+ /* Bind fresh objects */
+ list_for_each_entry(vma, vmas, exec_list) {
+ if (drm_mm_node_allocated(&vma->node))
+ continue;
+
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+ if (ret)
+ goto err;
+ }
+
+err:
+ if (ret != -ENOSPC || retry++)
+ return ret;
+
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(vma, vmas, exec_list)
+ i915_gem_execbuffer_unreserve_vma(vma);
+
+ ret = i915_gem_evict_vm(vm, true);
+ if (ret)
+ return ret;
+ } while (1);
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ bool need_relocs;
+ int *reloc_offset;
+ int i, total, ret;
+ unsigned count = args->buffer_count;
+
+ if (WARN_ON(list_empty(&eb->vmas)))
+ return 0;
+
+ vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
+
+ /* We may process another execbuffer during the unlock... */
+ while (!list_empty(&eb->vmas)) {
+ vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+ list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
+ drm_gem_object_unreference(&vma->obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec[i].relocation_count;
+
+ reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL || reloc_offset == NULL) {
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ u64 invalid_offset = (u64)-1;
+ int j;
+
+ user_relocs = to_user_ptr(exec[i].relocs_ptr);
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec[i].relocation_count * sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ /* As we do not update the known relocation offsets after
+ * relocating (due to the complexities in lock handling),
+ * we need to mark them as invalid now so that we force the
+ * relocation processing next time. Just in case the target
+ * object is evicted and then rebound into its old
+ * presumed_offset before the next execbuffer - if that
+ * happened we would make the mistake of assuming that the
+ * relocations were valid.
+ */
+ for (j = 0; j < exec[i].relocation_count; j++) {
+ if (__copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+ }
+
+ reloc_offset[i] = total;
+ total += exec[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ /* reacquire the objects */
+ eb_reset(eb);
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
+ if (ret)
+ goto err;
+
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ if (ret)
+ goto err;
+
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ int offset = vma->exec_entry - exec;
+ ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+ reloc + reloc_offset[offset]);
+ if (ret)
+ goto err;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+ struct list_head *vmas)
+{
+ struct i915_vma *vma;
+ uint32_t flush_domains = 0;
+ bool flush_chipset = false;
+ int ret;
+
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ ret = i915_gem_object_sync(obj, ring);
+ if (ret)
+ return ret;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ flush_chipset |= i915_gem_clflush_object(obj, false);
+
+ flush_domains |= obj->base.write_domain;
+ }
+
+ if (flush_chipset)
+ i915_gem_chipset_flush(ring->dev);
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
+ /* Unconditionally invalidate gpu caches and ensure that we do flush
+ * any residual writes from the previous batch.
+ */
+ return intel_ring_invalidate_all_caches(ring);
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+ if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
+ return false;
+
+ return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ int i;
+ unsigned relocs_total = 0;
+ unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
+ int length; /* limited by fault_in_pages_readable() */
+
+ if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
+ return -EINVAL;
+
+ /* First check for malicious input causing overflow in
+ * the worst case where we need to allocate the entire
+ * relocation tree as a single array.
+ */
+ if (exec[i].relocation_count > relocs_max - relocs_total)
+ return -EINVAL;
+ relocs_total += exec[i].relocation_count;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ /*
+ * We must check that the entire relocation array is safe
+ * to read, but since we may need to update the presumed
+ * offsets during execution, check for full write access.
+ */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (likely(!i915.prefault_disable)) {
+ if (fault_in_multipages_readable(ptr, length))
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static struct intel_context *
+i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring, const u32 ctx_id)
+{
+ struct intel_context *ctx = NULL;
+ struct i915_ctx_hang_stats *hs;
+
+ if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+ return ERR_PTR(-EINVAL);
+
+ ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ hs = &ctx->hang_stats;
+ if (hs->banned) {
+ DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
+ return ERR_PTR(-EIO);
+ }
+
+ return ctx;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
+ struct intel_engine_cs *ring)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
+
+ obj->base.write_domain = obj->base.pending_write_domain;
+ if (obj->base.write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_vma_move_to_active(vma, ring);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
+ /* check for potential scanout */
+ if (i915_gem_obj_ggtt_bound(obj) &&
+ i915_gem_obj_to_ggtt(obj)->pin_count)
+ intel_mark_fb_busy(obj, ring);
+
+ /* update for the implicit flush after a batch */
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ trace_i915_gem_object_change_domain(obj, old_read, old_write);
+ }
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *obj)
+{
+ /* Unconditionally force add_request to emit a full flush. */
+ ring->gpu_caches_dirty = true;
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ (void)__i915_add_request(ring, file, obj, NULL);
+}
+
+static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+ struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("sol reset is gen7/rcs only\n");
+ return -EINVAL;
+ }
+
+ ret = intel_ring_begin(ring, 4 * 3);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
+ }
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * Find one BSD ring to dispatch the corresponding BSD command.
+ * The Ring ID is returned.
+ */
+static int gen8_dispatch_bsd_ring(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ /* Check whether the file_priv is using one ring */
+ if (file_priv->bsd_ring)
+ return file_priv->bsd_ring->id;
+ else {
+ /* If no, use the ping-pong mechanism to select one ring */
+ int ring_id;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
+ ring_id = VCS;
+ dev_priv->mm.bsd_ring_dispatch_index = 1;
+ } else {
+ ring_id = VCS2;
+ dev_priv->mm.bsd_ring_dispatch_index = 0;
+ }
+ file_priv->bsd_ring = &dev_priv->ring[ring_id];
+ mutex_unlock(&dev->struct_mutex);
+ return ring_id;
+ }
+}
+
+static struct drm_i915_gem_object *
+eb_get_batch(struct eb_vmas *eb)
+{
+ struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+ return vma->obj;
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct eb_vmas *eb;
+ struct drm_i915_gem_object *batch_obj;
+ struct drm_clip_rect *cliprects = NULL;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
+ struct i915_address_space *vm;
+ const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+ u64 exec_start = args->batch_start_offset, exec_len;
+ u32 mask, flags;
+ int ret, mode, i;
+ bool need_relocs;
+
+ if (!i915_gem_check_execbuffer(args))
+ return -EINVAL;
+
+ ret = validate_exec_list(exec, args->buffer_count);
+ if (ret)
+ return ret;
+
+ flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
+ if (!file->is_master || !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ flags |= I915_DISPATCH_SECURE;
+ }
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
+
+ if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
+ DRM_DEBUG("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+
+ if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
+ ring = &dev_priv->ring[RCS];
+ else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
+ if (HAS_BSD2(dev)) {
+ int ring_id;
+ ring_id = gen8_dispatch_bsd_ring(dev, file);
+ ring = &dev_priv->ring[ring_id];
+ } else
+ ring = &dev_priv->ring[VCS];
+ } else
+ ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
+
+ if (!intel_ring_initialized(ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ mask = I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (mode != 0 && ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+ return -EINVAL;
+ }
+
+ if (mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("no rel constants on pre-gen4\n");
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+ return -EINVAL;
+ }
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count < 1) {
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ return -EINVAL;
+ }
+
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ if (copy_from_user(cliprects,
+ to_user_ptr(args->cliprects_ptr),
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto pre_mutex_err;
+ }
+ } else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
+ if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+ DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+ return -EINVAL;
+ }
+ }
+
+ intel_runtime_pm_get(dev_priv);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
+
+ if (dev_priv->ums.mm_suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -EBUSY;
+ goto pre_mutex_err;
+ }
+
+ ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = PTR_ERR(ctx);
+ goto pre_mutex_err;
+ }
+
+ i915_gem_context_reference(ctx);
+
+ vm = ctx->vm;
+ if (!USES_FULL_PPGTT(dev))
+ vm = &dev_priv->gtt.base;
+
+ eb = eb_create(args);
+ if (eb == NULL) {
+ i915_gem_context_unreference(ctx);
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ /* Look up object handles */
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
+ if (ret)
+ goto err;
+
+ /* take note of the batch buffer before we might reorder the lists */
+ batch_obj = eb_get_batch(eb);
+
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ if (ret)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ if (need_relocs)
+ ret = i915_gem_execbuffer_relocate(eb);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ eb, exec);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ if (batch_obj->base.pending_write_domain) {
+ DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ if (i915_needs_cmd_parser(ring)) {
+ ret = i915_parse_cmds(ring,
+ batch_obj,
+ args->batch_start_offset,
+ file->is_master);
+ if (ret)
+ goto err;
+
+ /*
+ * XXX: Actually do this when enabling batch copy...
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+ * from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically don't
+ * want that set when the command parser is enabled.
+ */
+ }
+
+ /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
+ * hsw should have this fixed, but bdw mucks it up again. */
+ if (flags & I915_DISPATCH_SECURE &&
+ !batch_obj->has_global_gtt_mapping) {
+ /* When we have multiple VMs, we'll need to make sure that we
+ * allocate space first */
+ struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
+ BUG_ON(!vma);
+ vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+ }
+
+ if (flags & I915_DISPATCH_SECURE)
+ exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+ else
+ exec_start += i915_gem_obj_offset(batch_obj, vm);
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
+ if (ret)
+ goto err;
+
+ ret = i915_switch_context(ring, ctx);
+ if (ret)
+ goto err;
+
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, mask << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto err;
+ }
+
+
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto err;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto err;
+ }
+
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+err:
+ /* the request owns the ref now */
+ i915_gem_context_unreference(ctx);
+ eb_destroy(eb);
+
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ kfree(cliprects);
+
+ /* intel_gpu_busy should also get a ref, so it will free when the device
+ * is really idle. */
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+ if (args->buffer_count < 1) {
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ to_user_ptr(args->buffers_ptr),
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_DEBUG("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = I915_EXEC_RENDER;
+ i915_execbuffer2_set_context_id(exec2, 0);
+
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+ struct drm_i915_gem_exec_object __user *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ break;
+ }
+ }
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+ if (args->buffer_count < 1 ||
+ args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+ DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->rsvd2 != 0) {
+ DRM_DEBUG("dirty rvsd2 field\n");
+ return -EINVAL;
+ }
+
+ exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (exec2_list == NULL)
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+ args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ to_user_ptr(args->buffers_ptr),
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_DEBUG("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ struct drm_i915_gem_exec_object2 *user_exec_list =
+ to_user_ptr(args->buffers_ptr);
+ int i;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user\n",
+ args->buffer_count);
+ break;
+ }
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 00000000000..8b3cde70336
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,2150 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ * Copyright © 2011-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
+
+bool intel_enable_ppgtt(struct drm_device *dev, bool full)
+{
+ if (i915.enable_ppgtt == 0)
+ return false;
+
+ if (i915.enable_ppgtt == 1 && full)
+ return false;
+
+ return true;
+}
+
+static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+{
+ if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ return 0;
+
+ if (enable_ppgtt == 1)
+ return 1;
+
+ if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+ return 2;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ DRM_INFO("Disabling PPGTT because VT-d is on\n");
+ return 0;
+ }
+#endif
+
+ return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
+}
+
+
+static void ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+static void ppgtt_unbind_vma(struct i915_vma *vma);
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
+
+static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
+{
+ gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ pte |= addr;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED_INDEX;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC_INDEX;
+ break;
+ default:
+ pte |= PPAT_CACHED_INDEX;
+ break;
+ }
+
+ return pte;
+}
+
+static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ pde |= addr;
+ if (level != I915_CACHE_NONE)
+ pde |= PPAT_CACHED_PDE_INDEX;
+ else
+ pde |= PPAT_UNCACHED_INDEX;
+ return pde;
+}
+
+static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
+{
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return pte;
+}
+
+static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
+{
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return pte;
+}
+
+static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
+{
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ /* Mark the page as writeable. Other platforms don't have a
+ * setting for read-only/writable, so this matches that behavior.
+ */
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
+{
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
+{
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+/* Broadwell Page Directory Pointer Descriptors */
+static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
+ uint64_t val, bool synchronous)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ int ret;
+
+ BUG_ON(entry >= 4);
+
+ if (synchronous) {
+ I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
+ I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
+ return 0;
+ }
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
+ intel_ring_emit(ring, (u32)(val >> 32));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
+ intel_ring_emit(ring, (u32)(val));
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring,
+ bool synchronous)
+{
+ int i, ret;
+
+ /* bit of a hack to find the actual last used pd */
+ int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+
+ for (i = used_pd - 1; i >= 0; i--) {
+ dma_addr_t addr = ppgtt->pd_dma_addr[i];
+ ret = gen8_write_pdp(ring, i, addr, synchronous);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+ unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+ unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+ unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+ unsigned num_entries = length >> PAGE_SHIFT;
+ unsigned last_pte, i;
+
+ scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+ I915_CACHE_LLC, use_scratch);
+
+ while (num_entries) {
+ struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
+
+ last_pte = pte + num_entries;
+ if (last_pte > GEN8_PTES_PER_PAGE)
+ last_pte = GEN8_PTES_PER_PAGE;
+
+ pt_vaddr = kmap_atomic(page_table);
+
+ for (i = pte; i < last_pte; i++) {
+ pt_vaddr[i] = scratch_pte;
+ num_entries--;
+ }
+
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
+ kunmap_atomic(pt_vaddr);
+
+ pte = 0;
+ if (++pde == GEN8_PDES_PER_PAGE) {
+ pdpe++;
+ pde = 0;
+ }
+ }
+}
+
+static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
+ struct sg_table *pages,
+ uint64_t start,
+ enum i915_cache_level cache_level)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen8_gtt_pte_t *pt_vaddr;
+ unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
+ unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
+ unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+ struct sg_page_iter sg_iter;
+
+ pt_vaddr = NULL;
+
+ for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+ break;
+
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
+
+ pt_vaddr[pte] =
+ gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
+ if (++pte == GEN8_PTES_PER_PAGE) {
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
+ kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
+ if (++pde == GEN8_PDES_PER_PAGE) {
+ pdpe++;
+ pde = 0;
+ }
+ pte = 0;
+ }
+ }
+ if (pt_vaddr) {
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
+ kunmap_atomic(pt_vaddr);
+ }
+}
+
+static void gen8_free_page_tables(struct page **pt_pages)
+{
+ int i;
+
+ if (pt_pages == NULL)
+ return;
+
+ for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
+ if (pt_pages[i])
+ __free_pages(pt_pages[i], 0);
+}
+
+static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
+ kfree(ppgtt->gen8_pt_pages[i]);
+ kfree(ppgtt->gen8_pt_dma_addr[i]);
+ }
+
+ __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+}
+
+static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+{
+ struct pci_dev *hwdev = ppgtt->base.dev->pdev;
+ int i, j;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ /* TODO: In the future we'll support sparse mappings, so this
+ * will have to change. */
+ if (!ppgtt->pd_dma_addr[i])
+ continue;
+
+ pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ if (addr)
+ pci_unmap_page(hwdev, addr, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+
+ list_del(&vm->global_link);
+ drm_mm_takedown(&vm->mm);
+
+ gen8_ppgtt_unmap_pages(ppgtt);
+ gen8_ppgtt_free(ppgtt);
+}
+
+static struct page **__gen8_alloc_page_tables(void)
+{
+ struct page **pt_pages;
+ int i;
+
+ pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
+ if (!pt_pages)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
+ pt_pages[i] = alloc_page(GFP_KERNEL);
+ if (!pt_pages[i])
+ goto bail;
+ }
+
+ return pt_pages;
+
+bail:
+ gen8_free_page_tables(pt_pages);
+ kfree(pt_pages);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
+ struct page **pt_pages[GEN8_LEGACY_PDPS];
+ int i, ret;
+
+ for (i = 0; i < max_pdp; i++) {
+ pt_pages[i] = __gen8_alloc_page_tables();
+ if (IS_ERR(pt_pages[i])) {
+ ret = PTR_ERR(pt_pages[i]);
+ goto unwind_out;
+ }
+ }
+
+ /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
+ * "atomic" - for cleanup purposes.
+ */
+ for (i = 0; i < max_pdp; i++)
+ ppgtt->gen8_pt_pages[i] = pt_pages[i];
+
+ return 0;
+
+unwind_out:
+ while (i--) {
+ gen8_free_page_tables(pt_pages[i]);
+ kfree(pt_pages[i]);
+ }
+
+ return ret;
+}
+
+static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
+ sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!ppgtt->gen8_pt_dma_addr[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
+ ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
+ if (!ppgtt->pd_pages)
+ return -ENOMEM;
+
+ ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+ BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+ return 0;
+}
+
+static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
+{
+ int ret;
+
+ ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
+ if (ret)
+ return ret;
+
+ ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
+ if (ret) {
+ __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
+ return ret;
+ }
+
+ ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+
+ ret = gen8_ppgtt_allocate_dma(ppgtt);
+ if (ret)
+ gen8_ppgtt_free(ppgtt);
+
+ return ret;
+}
+
+static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int pd)
+{
+ dma_addr_t pd_addr;
+ int ret;
+
+ pd_addr = pci_map_page(ppgtt->base.dev->pdev,
+ &ppgtt->pd_pages[pd], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+ ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
+ if (ret)
+ return ret;
+
+ ppgtt->pd_dma_addr[pd] = pd_addr;
+
+ return 0;
+}
+
+static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
+ const int pd,
+ const int pt)
+{
+ dma_addr_t pt_addr;
+ struct page *p;
+ int ret;
+
+ p = ppgtt->gen8_pt_pages[pd][pt];
+ pt_addr = pci_map_page(ppgtt->base.dev->pdev,
+ p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
+ if (ret)
+ return ret;
+
+ ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
+
+ return 0;
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ * FIXME: split allocation into smaller pieces. For now we only ever do this
+ * once, but with full PPGTT, the multiple contiguous allocations will be bad.
+ * TODO: Do something with the size parameter
+ */
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+ const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+ const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ int i, j, ret;
+
+ if (size % (1<<30))
+ DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+ /* 1. Do all our allocations for page directories and page tables. */
+ ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ if (ret)
+ return ret;
+
+ /*
+ * 2. Create DMA mappings for the page directories and page tables.
+ */
+ for (i = 0; i < max_pdp; i++) {
+ ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
+ if (ret)
+ goto bail;
+
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
+ if (ret)
+ goto bail;
+ }
+ }
+
+ /*
+ * 3. Map all the page directory entires to point to the page tables
+ * we've allocated.
+ *
+ * For now, the PPGTT helper functions all require that the PDEs are
+ * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
+ * will never need to touch the PDEs again.
+ */
+ for (i = 0; i < max_pdp; i++) {
+ gen8_ppgtt_pde_t *pd_vaddr;
+ pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
+ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+ pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
+ I915_CACHE_LLC);
+ }
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
+ kunmap_atomic(pd_vaddr);
+ }
+
+ ppgtt->enable = gen8_ppgtt_enable;
+ ppgtt->switch_mm = gen8_mm_switch;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
+
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+
+ DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
+ ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
+ DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
+ ppgtt->num_pd_entries,
+ (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
+ return 0;
+
+bail:
+ gen8_ppgtt_unmap_pages(ppgtt);
+ gen8_ppgtt_free(ppgtt);
+ return ret;
+}
+
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
+ struct i915_address_space *vm = &ppgtt->base;
+ gen6_gtt_pte_t __iomem *pd_addr;
+ gen6_gtt_pte_t scratch_pte;
+ uint32_t pd_entry;
+ int pte, pde;
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+
+ pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+
+ seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
+ ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+ for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
+ u32 expected;
+ gen6_gtt_pte_t *pt_vaddr;
+ dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+ pd_entry = readl(pd_addr + pde);
+ expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
+
+ if (pd_entry != expected)
+ seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+ pde,
+ pd_entry,
+ expected);
+ seq_printf(m, "\tPDE: %x\n", pd_entry);
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+ for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+ unsigned long va =
+ (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+ (pte * PAGE_SIZE);
+ int i;
+ bool found = false;
+ for (i = 0; i < 4; i++)
+ if (pt_vaddr[pte + i] != scratch_pte)
+ found = true;
+ if (!found)
+ continue;
+
+ seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+ for (i = 0; i < 4; i++) {
+ if (pt_vaddr[pte + i] != scratch_pte)
+ seq_printf(m, " %08x", pt_vaddr[pte + i]);
+ else
+ seq_puts(m, " SCRATCH ");
+ }
+ seq_puts(m, "\n");
+ }
+ kunmap_atomic(pt_vaddr);
+ }
+}
+
+static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
+ gen6_gtt_pte_t __iomem *pd_addr;
+ uint32_t pd_entry;
+ int i;
+
+ WARN_ON(ppgtt->pd_offset & 0x3f);
+ pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
+ ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ pt_addr = ppgtt->pt_dma_addr[i];
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+}
+
+static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
+{
+ BUG_ON(ppgtt->pd_offset & 0x3f);
+
+ return (ppgtt->pd_offset / 64) << 16;
+}
+
+static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* If we're in reset, we can assume the GPU is sufficiently idle to
+ * manually frob these bits. Ideally we could use the ring functions,
+ * except our error handling makes it quite difficult (can't use
+ * intel_ring_begin, ring->flush, or intel_ring_advance)
+ *
+ * FIXME: We should try not to special case reset
+ */
+ if (synchronous ||
+ i915_reset_in_progress(&dev_priv->gpu_error)) {
+ WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ POSTING_READ(RING_PP_DIR_BASE(ring));
+ return 0;
+ }
+
+ /* NB: TLBs must be flushed and invalidated before a switch */
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* If we're in reset, we can assume the GPU is sufficiently idle to
+ * manually frob these bits. Ideally we could use the ring functions,
+ * except our error handling makes it quite difficult (can't use
+ * intel_ring_begin, ring->flush, or intel_ring_advance)
+ *
+ * FIXME: We should try not to special case reset
+ */
+ if (synchronous ||
+ i915_reset_in_progress(&dev_priv->gpu_error)) {
+ WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ POSTING_READ(RING_PP_DIR_BASE(ring));
+ return 0;
+ }
+
+ /* NB: TLBs must be flushed and invalidated before a switch */
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ /* XXX: RCS is the only one to auto invalidate the TLBs? */
+ if (ring->id != RCS) {
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring,
+ bool synchronous)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!synchronous)
+ return 0;
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+
+ POSTING_READ(RING_PP_DIR_DCLV(ring));
+
+ return 0;
+}
+
+static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int j, ret;
+
+ for_each_ring(ring, dev_priv, j) {
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ /* We promise to do a switch later with FULL PPGTT. If this is
+ * aliasing, this is the one and only switch we'll do */
+ if (USES_FULL_PPGTT(dev))
+ continue;
+
+ ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for_each_ring(ring, dev_priv, j)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+ return ret;
+}
+
+static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ uint32_t ecochk, ecobits;
+ int i;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ if (IS_HASWELL(dev)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ I915_WRITE(GAM_ECOCHK, ecochk);
+
+ for_each_ring(ring, dev_priv, i) {
+ int ret;
+ /* GFX_MODE is per-ring on gen7+ */
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ /* We promise to do a switch later with FULL PPGTT. If this is
+ * aliasing, this is the one and only switch we'll do */
+ if (USES_FULL_PPGTT(dev))
+ continue;
+
+ ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ uint32_t ecochk, gab_ctl, ecobits;
+ int i;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+ ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ for_each_ring(ring, dev_priv, i) {
+ int ret = ppgtt->switch_mm(ppgtt, ring, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
+ unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned last_pte, i;
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ if (last_pte > I915_PPGTT_PT_ENTRIES)
+ last_pte = I915_PPGTT_PT_ENTRIES;
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+
+ for (i = first_pte; i < last_pte; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pt++;
+ }
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
+ struct sg_table *pages,
+ uint64_t start,
+ enum i915_cache_level cache_level)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen6_gtt_pte_t *pt_vaddr;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ struct sg_page_iter sg_iter;
+
+ pt_vaddr = NULL;
+ for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+
+ pt_vaddr[act_pte] =
+ vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
+ if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+ kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
+ act_pt++;
+ act_pte = 0;
+ }
+ }
+ if (pt_vaddr)
+ kunmap_atomic(pt_vaddr);
+}
+
+static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ if (ppgtt->pt_dma_addr) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(ppgtt->base.dev->pdev,
+ ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+}
+
+static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ __free_page(ppgtt->pt_pages[i]);
+ kfree(ppgtt->pt_pages);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+
+ list_del(&vm->global_link);
+ drm_mm_takedown(&ppgtt->base.mm);
+ drm_mm_remove_node(&ppgtt->node);
+
+ gen6_ppgtt_unmap_pages(ppgtt);
+ gen6_ppgtt_free(ppgtt);
+}
+
+static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool retried = false;
+ int ret;
+
+ /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+alloc:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ &ppgtt->node, GEN6_PD_SIZE,
+ GEN6_PD_ALIGN, 0,
+ 0, dev_priv->gtt.base.total,
+ DRM_MM_TOPDOWN);
+ if (ret == -ENOSPC && !retried) {
+ ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+ GEN6_PD_SIZE, GEN6_PD_ALIGN,
+ I915_CACHE_NONE,
+ 0, dev_priv->gtt.base.total,
+ 0);
+ if (ret)
+ return ret;
+
+ retried = true;
+ goto alloc;
+ }
+
+ if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ DRM_DEBUG("Forced to use aperture for PDEs\n");
+
+ ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
+ return ret;
+}
+
+static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!ppgtt->pt_pages)
+ return -ENOMEM;
+
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
+ if (!ppgtt->pt_pages[i]) {
+ gen6_ppgtt_free(ppgtt);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+{
+ int ret;
+
+ ret = gen6_ppgtt_allocate_page_directories(ppgtt);
+ if (ret)
+ return ret;
+
+ ret = gen6_ppgtt_allocate_page_tables(ppgtt);
+ if (ret) {
+ drm_mm_remove_node(&ppgtt->node);
+ return ret;
+ }
+
+ ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!ppgtt->pt_dma_addr) {
+ drm_mm_remove_node(&ppgtt->node);
+ gen6_ppgtt_free(ppgtt);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ int i;
+
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
+ gen6_ppgtt_unmap_pages(ppgtt);
+ return -EIO;
+ }
+
+ ppgtt->pt_dma_addr[i] = pt_addr;
+ }
+
+ return 0;
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ if (IS_GEN6(dev)) {
+ ppgtt->enable = gen6_ppgtt_enable;
+ ppgtt->switch_mm = gen6_mm_switch;
+ } else if (IS_HASWELL(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = hsw_mm_switch;
+ } else if (IS_GEN7(dev)) {
+ ppgtt->enable = gen7_ppgtt_enable;
+ ppgtt->switch_mm = gen7_mm_switch;
+ } else
+ BUG();
+
+ ret = gen6_ppgtt_alloc(ppgtt);
+ if (ret)
+ return ret;
+
+ ret = gen6_ppgtt_setup_page_tables(ppgtt);
+ if (ret) {
+ gen6_ppgtt_free(ppgtt);
+ return ret;
+ }
+
+ ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ ppgtt->debug_dump = gen6_dump_ppgtt;
+
+ ppgtt->pd_offset =
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
+
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+
+ DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+ ppgtt->node.size >> 20,
+ ppgtt->node.start / PAGE_SIZE);
+
+ return 0;
+}
+
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ ppgtt->base.dev = dev;
+ ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+
+ if (INTEL_INFO(dev)->gen < 8)
+ ret = gen6_ppgtt_init(ppgtt);
+ else if (IS_GEN8(dev))
+ ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
+ else
+ BUG();
+
+ if (!ret) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ kref_init(&ppgtt->ref);
+ drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
+ ppgtt->base.total);
+ i915_init_vm(dev_priv, &ppgtt->base);
+ if (INTEL_INFO(dev)->gen < 8) {
+ gen6_write_pdes(ppgtt);
+ DRM_DEBUG("Adding PPGTT at offset %x\n",
+ ppgtt->pd_offset << 10);
+ }
+ }
+
+ return ret;
+}
+
+static void
+ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+ cache_level);
+}
+
+static void ppgtt_unbind_vma(struct i915_vma *vma)
+{
+ vma->vm->clear_range(vma->vm,
+ vma->node.start,
+ vma->obj->base.size,
+ true);
+}
+
+extern int intel_iommu_gfx_mapped;
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline bool needs_idle_maps(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+ bool ret = dev_priv->mm.interruptible;
+
+ if (unlikely(dev_priv->gtt.do_idle_maps)) {
+ dev_priv->mm.interruptible = false;
+ if (i915_gpu_idle(dev_priv->dev)) {
+ DRM_ERROR("Couldn't idle GPU\n");
+ /* Wait a bit, in hopes it avoids the hang */
+ udelay(10);
+ }
+ }
+
+ return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+ if (unlikely(dev_priv->gtt.do_idle_maps))
+ dev_priv->mm.interruptible = interruptible;
+}
+
+void i915_check_and_clear_faults(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ for_each_ring(ring, dev_priv, i) {
+ u32 fault_reg;
+ fault_reg = I915_READ(RING_FAULT_REG(ring));
+ if (fault_reg & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault_reg & PAGE_MASK,
+ fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault_reg),
+ RING_FAULT_FAULT_TYPE(fault_reg));
+ I915_WRITE(RING_FAULT_REG(ring),
+ fault_reg & ~RING_FAULT_VALID);
+ }
+ }
+ POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Don't bother messing with faults pre GEN6 as we have little
+ * documentation supporting that it's a good idea.
+ */
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ i915_check_and_clear_faults(dev);
+
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
+ true);
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ i915_check_and_clear_faults(dev);
+
+ /* First fill our portion of the GTT with scratch pages */
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
+ true);
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+ &dev_priv->gtt.base);
+ if (!vma)
+ continue;
+
+ i915_gem_clflush_object(obj, obj->pin_display);
+ /* The bind_vma code tries to be smart about tracking mappings.
+ * Unfortunately above, we've just wiped out the mappings
+ * without telling our object about it. So we need to fake it.
+ */
+ obj->has_global_gtt_mapping = 0;
+ vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ }
+
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ if (IS_CHERRYVIEW(dev))
+ chv_setup_private_ppat(dev_priv);
+ else
+ bdw_setup_private_ppat(dev_priv);
+
+ return;
+ }
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
+ if (i915_is_ggtt(vm)) {
+ if (dev_priv->mm.aliasing_ppgtt)
+ gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
+ continue;
+ }
+
+ gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
+ }
+
+ i915_gem_chipset_flush(dev);
+}
+
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+{
+ if (obj->has_dma_mapping)
+ return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+{
+#ifdef writeq
+ writeq(pte, addr);
+#else
+ iowrite32((u32)pte, addr);
+ iowrite32(pte >> 32, addr + 4);
+#endif
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct sg_table *st,
+ uint64_t start,
+ enum i915_cache_level level)
+{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ gen8_gtt_pte_t __iomem *gtt_entries =
+ (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ int i = 0;
+ struct sg_page_iter sg_iter;
+ dma_addr_t addr = 0;
+
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+ addr = sg_dma_address(sg_iter.sg) +
+ (sg_iter.sg_pgoffset << PAGE_SHIFT);
+ gen8_set_pte(&gtt_entries[i],
+ gen8_pte_encode(addr, level, true));
+ i++;
+ }
+
+ /*
+ * XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0)
+ WARN_ON(readq(&gtt_entries[i-1])
+ != gen8_pte_encode(addr, level, true));
+
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct sg_table *st,
+ uint64_t start,
+ enum i915_cache_level level)
+{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ gen6_gtt_pte_t __iomem *gtt_entries =
+ (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ int i = 0;
+ struct sg_page_iter sg_iter;
+ dma_addr_t addr;
+
+ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+ addr = sg_page_iter_dma_address(&sg_iter);
+ iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+ i++;
+ }
+
+ /* XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) !=
+ vm->pte_encode(addr, level, true));
+
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch)
+{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
+ gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
+ (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = gen8_pte_encode(vm->scratch.addr,
+ I915_CACHE_LLC,
+ use_scratch);
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+ readl(gtt_base);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch)
+{
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
+ gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
+}
+
+
+static void i915_ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ const unsigned long entry = vma->node.start >> PAGE_SHIFT;
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ BUG_ON(!i915_is_ggtt(vma->vm));
+ intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+ vma->obj->has_global_gtt_mapping = 1;
+}
+
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool unused)
+{
+ unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned num_entries = length >> PAGE_SHIFT;
+ intel_gtt_clear_range(first_entry, num_entries);
+}
+
+static void i915_ggtt_unbind_vma(struct i915_vma *vma)
+{
+ const unsigned int first = vma->node.start >> PAGE_SHIFT;
+ const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
+
+ BUG_ON(!i915_is_ggtt(vma->vm));
+ vma->obj->has_global_gtt_mapping = 0;
+ intel_gtt_clear_range(first, size);
+}
+
+static void ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct drm_device *dev = vma->vm->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ /* If there is no aliasing PPGTT, or the caller needs a global mapping,
+ * or we have a global mapping already but the cacheability flags have
+ * changed, set the global PTEs.
+ *
+ * If there is an aliasing PPGTT it is anecdotally faster, so use that
+ * instead if none of the above hold true.
+ *
+ * NB: A global mapping should only be needed for special regions like
+ * "gtt mappable", SNB errata, or if specified via special execbuf
+ * flags. At all other times, the GPU will use the aliasing PPGTT.
+ */
+ if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
+ if (!obj->has_global_gtt_mapping ||
+ (cache_level != obj->cache_level)) {
+ vma->vm->insert_entries(vma->vm, obj->pages,
+ vma->node.start,
+ cache_level);
+ obj->has_global_gtt_mapping = 1;
+ }
+ }
+
+ if (dev_priv->mm.aliasing_ppgtt &&
+ (!obj->has_aliasing_ppgtt_mapping ||
+ (cache_level != obj->cache_level))) {
+ struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+ appgtt->base.insert_entries(&appgtt->base,
+ vma->obj->pages,
+ vma->node.start,
+ cache_level);
+ vma->obj->has_aliasing_ppgtt_mapping = 1;
+ }
+}
+
+static void ggtt_unbind_vma(struct i915_vma *vma)
+{
+ struct drm_device *dev = vma->vm->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (obj->has_global_gtt_mapping) {
+ vma->vm->clear_range(vma->vm,
+ vma->node.start,
+ obj->base.size,
+ true);
+ obj->has_global_gtt_mapping = 0;
+ }
+
+ if (obj->has_aliasing_ppgtt_mapping) {
+ struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+ appgtt->base.clear_range(&appgtt->base,
+ vma->node.start,
+ obj->base.size,
+ true);
+ obj->has_aliasing_ppgtt_mapping = 0;
+ }
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible;
+
+ interruptible = do_idling(dev_priv);
+
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
+
+ undo_idling(dev_priv, interruptible);
+}
+
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+ unsigned long color,
+ unsigned long *start,
+ unsigned long *end)
+{
+ if (node->color != color)
+ *start += 4096;
+
+ if (!list_empty(&node->node_list)) {
+ node = list_entry(node->node_list.next,
+ struct drm_mm_node,
+ node_list);
+ if (node->allocated && node->color != color)
+ *end -= 4096;
+ }
+}
+
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
+{
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct drm_mm_node *entry;
+ struct drm_i915_gem_object *obj;
+ unsigned long hole_start, hole_end;
+
+ BUG_ON(mappable_end > end);
+
+ /* Subtract the guard page ... */
+ drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
+ if (!HAS_LLC(dev))
+ dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
+
+ /* Mark any preallocated objects as occupied */
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+ int ret;
+ DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+ i915_gem_obj_ggtt_offset(obj), obj->base.size);
+
+ WARN_ON(i915_gem_obj_ggtt_bound(obj));
+ ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+ if (ret)
+ DRM_DEBUG_KMS("Reservation failed\n");
+ obj->has_global_gtt_mapping = 1;
+ }
+
+ dev_priv->gtt.base.start = start;
+ dev_priv->gtt.base.total = end - start;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+ DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ ggtt_vm->clear_range(ggtt_vm, hole_start,
+ hole_end - hole_start, true);
+ }
+
+ /* And finally clear the reserved guard page */
+ ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+
+ gtt_size = dev_priv->gtt.base.total;
+ mappable_size = dev_priv->gtt.mappable_end;
+
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+#ifdef CONFIG_INTEL_IOMMU
+ dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dma_addr))
+ return -EINVAL;
+#else
+ dma_addr = page_to_phys(page);
+#endif
+ dev_priv->gtt.base.scratch.page = page;
+ dev_priv->gtt.base.scratch.addr = dma_addr;
+
+ return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct page *page = dev_priv->gtt.base.scratch.page;
+
+ set_pages_wb(page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(page);
+ __free_page(page);
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+ return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
+ return bdw_gmch_ctl << 25; /* 32 MB units */
+}
+
+static size_t chv_get_stolen_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+ /*
+ * 0x0 to 0x10: 32MB increments starting at 0MB
+ * 0x11 to 0x16: 4MB increments starting at 8MB
+ * 0x17 to 0x1d: 4MB increments start at 36MB
+ */
+ if (gmch_ctrl < 0x11)
+ return gmch_ctrl << 25;
+ else if (gmch_ctrl < 0x17)
+ return (gmch_ctrl - 0x11 + 2) << 22;
+ else
+ return (gmch_ctrl - 0x17 + 9) << 22;
+}
+
+static int ggtt_probe_common(struct drm_device *dev,
+ size_t gtt_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ phys_addr_t gtt_phys_addr;
+ int ret;
+
+ /* For Modern GENs the PTEs and register space are split in the BAR */
+ gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
+ (pci_resource_len(dev->pdev, 0) / 2);
+
+ dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
+ if (!dev_priv->gtt.gsm) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ return -ENOMEM;
+ }
+
+ ret = setup_scratch_page(dev);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(dev_priv->gtt.gsm);
+ }
+
+ return ret;
+}
+
+/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases. */
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+ uint64_t pat;
+
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+ /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
+ * write would work. */
+ I915_WRITE(GEN8_PRIVATE_PAT, pat);
+ I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
+}
+
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+ uint64_t pat;
+
+ /*
+ * Map WB on BDW to snooped on CHV.
+ *
+ * Only the snoop bit has meaning for CHV, the rest is
+ * ignored.
+ *
+ * Note that the harware enforces snooping for all page
+ * table accesses. The snoop bit is actually ignored for
+ * PDEs.
+ */
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ I915_WRITE(GEN8_PRIVATE_PAT, pat);
+ I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
+}
+
+static int gen8_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int gtt_size;
+ u16 snb_gmch_ctl;
+ int ret;
+
+ /* TODO: We're not aware of mappable constraints on gen8 yet */
+ *mappable_base = pci_resource_start(dev->pdev, 2);
+ *mappable_end = pci_resource_len(dev->pdev, 2);
+
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ if (IS_CHERRYVIEW(dev)) {
+ *stolen = chv_get_stolen_size(snb_gmch_ctl);
+ gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
+ } else {
+ *stolen = gen8_get_stolen_size(snb_gmch_ctl);
+ gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ }
+
+ *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+
+ if (IS_CHERRYVIEW(dev))
+ chv_setup_private_ppat(dev_priv);
+ else
+ bdw_setup_private_ppat(dev_priv);
+
+ ret = ggtt_probe_common(dev, gtt_size);
+
+ dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+
+ return ret;
+}
+
+static int gen6_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int gtt_size;
+ u16 snb_gmch_ctl;
+ int ret;
+
+ *mappable_base = pci_resource_start(dev->pdev, 2);
+ *mappable_end = pci_resource_len(dev->pdev, 2);
+
+ /* 64/512MB is the current min/max we actually know of, but this is just
+ * a coarse sanity check.
+ */
+ if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+ DRM_ERROR("Unknown GMADR size (%lx)\n",
+ dev_priv->gtt.mappable_end);
+ return -ENXIO;
+ }
+
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+ gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+
+ ret = ggtt_probe_common(dev, gtt_size);
+
+ dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
+
+ return ret;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+
+ struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+ if (drm_mm_initialized(&vm->mm)) {
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+ }
+ iounmap(gtt->gsm);
+ teardown_scratch_page(vm->dev);
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+
+ dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+ dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+
+ if (unlikely(dev_priv->gtt.do_idle_maps))
+ DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+static void i915_gmch_remove(struct i915_address_space *vm)
+{
+ if (drm_mm_initialized(&vm->mm)) {
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+ }
+ intel_gmch_remove();
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen <= 5) {
+ gtt->gtt_probe = i915_gmch_probe;
+ gtt->base.cleanup = i915_gmch_remove;
+ } else if (INTEL_INFO(dev)->gen < 8) {
+ gtt->gtt_probe = gen6_gmch_probe;
+ gtt->base.cleanup = gen6_gmch_remove;
+ if (IS_HASWELL(dev) && dev_priv->ellc_size)
+ gtt->base.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(dev))
+ gtt->base.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(dev))
+ gtt->base.pte_encode = byt_pte_encode;
+ else if (INTEL_INFO(dev)->gen >= 7)
+ gtt->base.pte_encode = ivb_pte_encode;
+ else
+ gtt->base.pte_encode = snb_pte_encode;
+ } else {
+ dev_priv->gtt.gtt_probe = gen8_gmch_probe;
+ dev_priv->gtt.base.cleanup = gen6_gmch_remove;
+ }
+
+ ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
+ &gtt->mappable_base, &gtt->mappable_end);
+ if (ret)
+ return ret;
+
+ gtt->base.dev = dev;
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_INFO("Memory usable by graphics device = %zdM\n",
+ gtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped)
+ DRM_INFO("VT-d active for gfx access\n");
+#endif
+ /*
+ * i915.enable_ppgtt is read-only, so do an early pass to validate the
+ * user's requested state against the hardware/driver capabilities. We
+ * do this now so that we can print out any log messages once rather
+ * than every time we check intel_enable_ppgtt().
+ */
+ i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+ DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+
+ return 0;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->exec_list);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ switch (INTEL_INFO(vm->dev)->gen) {
+ case 8:
+ case 7:
+ case 6:
+ if (i915_is_ggtt(vm)) {
+ vma->unbind_vma = ggtt_unbind_vma;
+ vma->bind_vma = ggtt_bind_vma;
+ } else {
+ vma->unbind_vma = ppgtt_unbind_vma;
+ vma->bind_vma = ppgtt_bind_vma;
+ }
+ break;
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ BUG_ON(!i915_is_ggtt(vm));
+ vma->unbind_vma = i915_ggtt_unbind_vma;
+ vma->bind_vma = i915_ggtt_bind_vma;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Keep GGTT vmas first to make debug easier */
+ if (i915_is_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
new file mode 100644
index 00000000000..1b96a06be3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __I915_GEM_GTT_H__
+#define __I915_GEM_GTT_H__
+
+typedef uint32_t gen6_gtt_pte_t;
+typedef uint64_t gen8_gtt_pte_t;
+typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
+
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+
+#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_VALID (1 << 0)
+
+#define GEN6_PPGTT_PD_ENTRIES 512
+#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_VALID (1 << 0)
+
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
+#define BYT_PTE_WRITEABLE (1 << 1)
+
+/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED (0)
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
+
+/* GEN8 legacy style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ */
+#define GEN8_PDPE_SHIFT 30
+#define GEN8_PDPE_MASK 0x3
+#define GEN8_PDE_SHIFT 21
+#define GEN8_PDE_MASK 0x1ff
+#define GEN8_PTE_SHIFT 12
+#define GEN8_PTE_MASK 0x1ff
+#define GEN8_LEGACY_PDPS 4
+#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
+#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+
+#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
+#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP (1<<6)
+#define GEN8_PPAT_AGE(x) (x<<4)
+#define GEN8_PPAT_LLCeLLC (3<<2)
+#define GEN8_PPAT_LLCELLC (2<<2)
+#define GEN8_PPAT_LLC (1<<2)
+#define GEN8_PPAT_WB (3<<0)
+#define GEN8_PPAT_WT (2<<0)
+#define GEN8_PPAT_WC (1<<0)
+#define GEN8_PPAT_UC (0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
+#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+
+enum i915_cache_level;
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head mm_list;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+ /** Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page. */
+ void (*unbind_vma)(struct i915_vma *vma);
+ /* Map an object into an address space with the given cache flags. */
+#define GLOBAL_BIND (1<<0)
+ void (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+};
+
+struct i915_address_space {
+ struct drm_mm mm;
+ struct drm_device *dev;
+ struct list_head global_link;
+ unsigned long start; /* Start offset always 0 for dri2 */
+ size_t total; /* size addr space maps (ex. 2GB for ggtt) */
+
+ struct {
+ dma_addr_t addr;
+ struct page *page;
+ } scratch;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /* FIXME: Need a more generic return type */
+ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid); /* Create a valid PTE */
+ void (*clear_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct sg_table *st,
+ uint64_t start,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_address_space *vm);
+};
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+ struct i915_address_space base;
+ size_t stolen_size; /* Total size of stolen memory */
+
+ unsigned long mappable_end; /* End offset that we can CPU map */
+ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
+ phys_addr_t mappable_base; /* PA of our GMADR */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+
+ bool do_idle_maps;
+
+ int mtrr;
+
+ /* global gtt ops */
+ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ size_t *stolen, phys_addr_t *mappable_base,
+ unsigned long *mappable_end);
+};
+
+struct i915_hw_ppgtt {
+ struct i915_address_space base;
+ struct kref ref;
+ struct drm_mm_node node;
+ unsigned num_pd_entries;
+ unsigned num_pd_pages; /* gen8+ */
+ union {
+ struct page **pt_pages;
+ struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
+ };
+ struct page *pd_pages;
+ union {
+ uint32_t pd_offset;
+ dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
+ };
+ union {
+ dma_addr_t *pt_dma_addr;
+ dma_addr_t *gen8_pt_dma_addr[4];
+ };
+
+ struct intel_context *ctx;
+
+ int (*enable)(struct i915_hw_ppgtt *ppgtt);
+ int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring,
+ bool synchronous);
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
+
+bool intel_enable_ppgtt(struct drm_device *dev, bool full);
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+
+void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
new file mode 100644
index 00000000000..34894b57306
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_renderstate.h"
+
+struct i915_render_state {
+ struct drm_i915_gem_object *obj;
+ unsigned long ggtt_offset;
+ u32 *batch;
+ u32 size;
+ u32 len;
+};
+
+static struct i915_render_state *render_state_alloc(struct drm_device *dev)
+{
+ struct i915_render_state *so;
+ struct page *page;
+ int ret;
+
+ so = kzalloc(sizeof(*so), GFP_KERNEL);
+ if (!so)
+ return ERR_PTR(-ENOMEM);
+
+ so->obj = i915_gem_alloc_object(dev, 4096);
+ if (so->obj == NULL) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ so->size = 4096;
+
+ ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+ if (ret)
+ goto free_gem;
+
+ BUG_ON(so->obj->pages->nents != 1);
+ page = sg_page(so->obj->pages->sgl);
+
+ so->batch = kmap(page);
+ if (!so->batch) {
+ ret = -ENOMEM;
+ goto unpin;
+ }
+
+ so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+
+ return so;
+unpin:
+ i915_gem_object_ggtt_unpin(so->obj);
+free_gem:
+ drm_gem_object_unreference(&so->obj->base);
+free:
+ kfree(so);
+ return ERR_PTR(ret);
+}
+
+static void render_state_free(struct i915_render_state *so)
+{
+ kunmap(kmap_to_page(so->batch));
+ i915_gem_object_ggtt_unpin(so->obj);
+ drm_gem_object_unreference(&so->obj->base);
+ kfree(so);
+}
+
+static const struct intel_renderstate_rodata *
+render_state_get_rodata(struct drm_device *dev, const int gen)
+{
+ switch (gen) {
+ case 6:
+ return &gen6_null_state;
+ case 7:
+ return &gen7_null_state;
+ case 8:
+ return &gen8_null_state;
+ }
+
+ return NULL;
+}
+
+static int render_state_setup(const int gen,
+ const struct intel_renderstate_rodata *rodata,
+ struct i915_render_state *so)
+{
+ const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
+ u32 reloc_index = 0;
+ u32 * const d = so->batch;
+ unsigned int i = 0;
+ int ret;
+
+ if (!rodata || rodata->batch_items * 4 > so->size)
+ return -EINVAL;
+
+ ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
+ if (ret)
+ return ret;
+
+ while (i < rodata->batch_items) {
+ u32 s = rodata->batch[i];
+
+ if (reloc_index < rodata->reloc_items &&
+ i * 4 == rodata->reloc[reloc_index]) {
+
+ s += goffset & 0xffffffff;
+
+ /* We keep batch offsets max 32bit */
+ if (gen >= 8) {
+ if (i + 1 >= rodata->batch_items ||
+ rodata->batch[i + 1] != 0)
+ return -EINVAL;
+
+ d[i] = s;
+ i++;
+ s = (goffset & 0xffffffff00000000ull) >> 32;
+ }
+
+ reloc_index++;
+ }
+
+ d[i] = s;
+ i++;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+ if (ret)
+ return ret;
+
+ if (rodata->reloc_items != reloc_index) {
+ DRM_ERROR("not all relocs resolved, %d out of %d\n",
+ reloc_index, rodata->reloc_items);
+ return -EINVAL;
+ }
+
+ so->len = rodata->batch_items * 4;
+
+ return 0;
+}
+
+int i915_gem_render_state_init(struct intel_engine_cs *ring)
+{
+ const int gen = INTEL_INFO(ring->dev)->gen;
+ struct i915_render_state *so;
+ const struct intel_renderstate_rodata *rodata;
+ int ret;
+
+ if (WARN_ON(ring->id != RCS))
+ return -ENOENT;
+
+ rodata = render_state_get_rodata(ring->dev, gen);
+ if (rodata == NULL)
+ return 0;
+
+ so = render_state_alloc(ring->dev);
+ if (IS_ERR(so))
+ return PTR_ERR(so);
+
+ ret = render_state_setup(gen, rodata, so);
+ if (ret)
+ goto out;
+
+ ret = ring->dispatch_execbuffer(ring,
+ i915_gem_obj_ggtt_offset(so->obj),
+ so->len,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+
+ ret = __i915_add_request(ring, NULL, so->obj, NULL);
+ /* __i915_add_request moves object to inactive if it fails */
+out:
+ render_state_free(so);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 00000000000..7465ab0fd39
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct resource *r;
+ u32 base;
+
+ /* Almost universally we can find the Graphics Base of Stolen Memory
+ * at offset 0x5c in the igfx configuration space. On a few (desktop)
+ * machines this is also mirrored in the bridge device at different
+ * locations, or in the MCHBAR. On gen2, the layout is again slightly
+ * different with the Graphics Segment immediately following Top of
+ * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
+ * reported by 865g, so we just use the top of memory as determined
+ * by the e820 probe.
+ *
+ * XXX However gen2 requires an unavailable symbol.
+ */
+ base = 0;
+ if (INTEL_INFO(dev)->gen >= 3) {
+ /* Read Graphics Base of Stolen Memory directly */
+ pci_read_config_dword(dev->pdev, 0x5c, &base);
+ base &= ~((1<<20) - 1);
+ } else { /* GEN2 */
+#if 0
+ /* Stolen is immediately above Top of Memory */
+ base = max_low_pfn_mapped << PAGE_SHIFT;
+#endif
+ }
+
+ if (base == 0)
+ return 0;
+
+ /* make sure we don't clobber the GTT if it's within stolen memory */
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ struct {
+ u32 start, end;
+ } stolen[2] = {
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ };
+ u64 gtt_start, gtt_end;
+
+ gtt_start = I915_READ(PGTBL_CTL);
+ if (IS_GEN4(dev))
+ gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ else
+ gtt_start &= PGTBL_ADDRESS_LO_MASK;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+ if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+ stolen[0].end = gtt_start;
+ if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+ stolen[1].start = gtt_end;
+
+ /* pick the larger of the two chunks */
+ if (stolen[0].end - stolen[0].start >
+ stolen[1].end - stolen[1].start) {
+ base = stolen[0].start;
+ dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ } else {
+ base = stolen[1].start;
+ dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ }
+
+ if (stolen[0].start != stolen[1].start ||
+ stolen[0].end != stolen[1].end) {
+ DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+ (unsigned long long) gtt_start,
+ (unsigned long long) gtt_end - 1);
+ DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+ base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ }
+ }
+
+
+ /* Verify that nothing else uses this physical address. Stolen
+ * memory should be reserved by the BIOS and hidden from the
+ * kernel. So if the region is already marked as busy, something
+ * is seriously wrong.
+ */
+ r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ /*
+ * One more attempt but this time requesting region from
+ * base + 1, as we have seen that this resolves the region
+ * conflict with the PCI Bus.
+ * This is a BIOS w/a: Some BIOS wrap stolen in the root
+ * PCI bus, but have an off-by-one error. Hence retry the
+ * reservation starting from 1 instead of 0.
+ */
+ r = devm_request_mem_region(dev->dev, base + 1,
+ dev_priv->gtt.stolen_size - 1,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
+ }
+
+ return base;
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int ret;
+
+ compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
+ if (!compressed_fb)
+ goto err_llb;
+
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_llb;
+
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+ if (!compressed_llb)
+ goto err_fb;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
+ 4096, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_fb;
+
+ dev_priv->fbc.compressed_llb = compressed_llb;
+
+ I915_WRITE(FBC_CFB_BASE,
+ dev_priv->mm.stolen_base + compressed_fb->start);
+ I915_WRITE(FBC_LL_BASE,
+ dev_priv->mm.stolen_base + compressed_llb->start);
+ }
+
+ dev_priv->fbc.compressed_fb = compressed_fb;
+ dev_priv->fbc.size = size;
+
+ DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+ size);
+
+ return 0;
+
+err_fb:
+ kfree(compressed_llb);
+ drm_mm_remove_node(compressed_fb);
+err_llb:
+ kfree(compressed_fb);
+ pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ return -ENOSPC;
+}
+
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return -ENODEV;
+
+ if (size < dev_priv->fbc.size)
+ return 0;
+
+ /* Release any current block */
+ i915_gem_stolen_cleanup_compression(dev);
+
+ return i915_setup_compression(dev, size);
+}
+
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->fbc.size == 0)
+ return;
+
+ if (dev_priv->fbc.compressed_fb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_fb);
+ kfree(dev_priv->fbc.compressed_fb);
+ }
+
+ if (dev_priv->fbc.compressed_llb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_llb);
+ kfree(dev_priv->fbc.compressed_llb);
+ }
+
+ dev_priv->fbc.size = 0;
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return;
+
+ i915_gem_stolen_cleanup_compression(dev);
+ drm_mm_takedown(&dev_priv->mm.stolen);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int bios_reserved = 0;
+
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+ DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ return 0;
+ }
+#endif
+
+ if (dev_priv->gtt.stolen_size == 0)
+ return 0;
+
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+ DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
+ dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
+
+ if (IS_VALLEYVIEW(dev))
+ bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
+
+ if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+ return 0;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
+ bios_reserved);
+
+ return 0;
+}
+
+static struct sg_table *
+i915_pages_create_for_stolen(struct drm_device *dev,
+ u32 offset, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sg_table *st;
+ struct scatterlist *sg;
+
+ DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
+ BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+
+ /* We hide that we have no struct page backing our stolen object
+ * by wrapping the contiguous physical allocation with a fake
+ * dma mapping in a single scatterlist.
+ */
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return NULL;
+ }
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = size;
+
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_len(sg) = size;
+
+ return st;
+}
+
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ BUG();
+ return -EINVAL;
+}
+
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ /* Should only be called during free */
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+ .get_pages = i915_gem_object_get_pages_stolen,
+ .put_pages = i915_gem_object_put_pages_stolen,
+};
+
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct drm_device *dev,
+ struct drm_mm_node *stolen)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return NULL;
+
+ drm_gem_private_object_init(dev, &obj->base, stolen->size);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+
+ obj->pages = i915_pages_create_for_stolen(dev,
+ stolen->start, stolen->size);
+ if (obj->pages == NULL)
+ goto cleanup;
+
+ obj->has_dma_mapping = true;
+ i915_gem_object_pin_pages(obj);
+ obj->stolen = stolen;
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+ obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+ return obj;
+
+cleanup:
+ i915_gem_object_free(obj);
+ return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+ int ret;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return NULL;
+
+ DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
+ if (size == 0)
+ return NULL;
+
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
+ 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret) {
+ kfree(stolen);
+ return NULL;
+ }
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj)
+ return obj;
+
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
+ return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+ u32 stolen_offset,
+ u32 gtt_offset,
+ u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+ struct i915_vma *vma;
+ int ret;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return NULL;
+
+ DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
+ stolen_offset, gtt_offset, size);
+
+ /* KISS and expect everything to be page-aligned */
+ BUG_ON(stolen_offset & 4095);
+ BUG_ON(size & 4095);
+
+ if (WARN_ON(size == 0))
+ return NULL;
+
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ stolen->start = stolen_offset;
+ stolen->size = size;
+ ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ kfree(stolen);
+ return NULL;
+ }
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj == NULL) {
+ DRM_DEBUG_KMS("failed to allocate stolen object\n");
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
+ return NULL;
+ }
+
+ /* Some objects just need physical mem from stolen space */
+ if (gtt_offset == I915_GTT_OFFSET_NONE)
+ return obj;
+
+ vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_out;
+ }
+
+ /* To simplify the initialisation sequence between KMS and GTT,
+ * we allow construction of the stolen object prior to
+ * setting up the GTT space. The actual reservation will occur
+ * later.
+ */
+ vma->node.start = gtt_offset;
+ vma->node.size = size;
+ if (drm_mm_initialized(&ggtt->mm)) {
+ ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+ goto err_vma;
+ }
+ }
+
+ obj->has_global_gtt_mapping = 1;
+
+ list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
+ list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+ i915_gem_object_pin_pages(obj);
+
+ return obj;
+
+err_vma:
+ i915_gem_vma_destroy(vma);
+err_out:
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
+}
+
+void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
+ obj->stolen = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2685b..cb150e8b433 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,13 +25,10 @@
*
*/
-#include <linux/acpi.h>
-#include <linux/pnp.h>
-#include "linux/string.h"
-#include "linux/bitops.h"
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
/** @file i915_gem_tiling.c
@@ -83,120 +80,6 @@
* to match what the GPU expects.
*/
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define DEVEN_MCHBAR_EN (1 << 28)
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret = 0;
-
- if (IS_I965G(dev))
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
- goto out;
- }
-#endif
-
- /* Get some space for it */
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- goto out;
- }
-
- if (IS_I965G(dev))
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
-out:
- return ret;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static bool
-intel_setup_mchbar(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool need_disable = false, enabled;
-
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- goto out;
-
- if (intel_alloc_mchbar_resource(dev))
- goto out;
-
- need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-out:
- return need_disable;
-}
-
-static void
-intel_teardown_mchbar(struct drm_device *dev, bool disable)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
-
- if (disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- temp &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
@@ -204,30 +87,47 @@ intel_teardown_mchbar(struct drm_device *dev, bool disable)
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- bool need_disable;
- if (IS_IRONLAKE(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated with
+ * identically sized dimms. We don't need to check the 3rd
+ * channel because no cpu with gpu attached ships in that
+ * configuration. Also, swizzling only makes sense for 2
+ * channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (!IS_I9XX(dev)) {
+ } else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev)) {
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
- /* Try to make sure MCHBAR is enabled before poking at it */
- need_disable = intel_setup_mchbar(dev);
-
- /* On mobile 9xx chipsets, channel interleave by the CPU is
+ /* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
@@ -266,8 +166,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
-
- intel_teardown_mchbar(dev, need_disable);
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
@@ -302,41 +200,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
-
-/**
- * Returns whether an object is currently fenceable. If not, it may need
- * to be unbound and have its pitch adjusted.
- */
-bool
-i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (IS_I965G(dev)) {
- /* The 965 can have fences at any page boundary. */
- if (obj->size & 4095)
- return false;
- return true;
- } else if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
-
- /* Power of two sized... */
- if (obj->size & (obj->size - 1))
- return false;
-
- /* Objects must be size aligned as well */
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
- return true;
-}
-
/* Check pitch constriants for all chips & tiling formats */
-bool
+static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -345,76 +210,78 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I9XX(dev) ||
+ if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
- if (IS_I965G(dev)) {
- /* i965 stores the end address of the gtt mapping in the fence
- * reg, so dont bother to check the size */
- if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+ /* i965+ stores the end address of the gtt mapping in the fence
+ * reg, so dont bother to check the size */
+ if (INTEL_INFO(dev)->gen >= 7) {
+ if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_I9XX(dev)) {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
- * instead of 4 (2KB) on 945s.
- */
- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 20))
+ } else if (INTEL_INFO(dev)->gen >= 4) {
+ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 19))
+ if (stride > 8192)
return false;
+
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
}
+ if (stride < tile_width)
+ return false;
+
/* 965+ just needs multiples of tile width */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
}
/* Pre-965 needs power of two tile widths */
- if (stride < tile_width)
- return false;
-
if (stride & (stride - 1))
return false;
return true;
}
+/* Is the current GTT allocation valid for the change in tiling? */
static bool
-i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ u32 size;
- if (obj_priv->gtt_space == NULL)
+ if (tiling_mode == I915_TILING_NONE)
return true;
- if (tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (!IS_I965G(dev)) {
- if (obj_priv->gtt_offset & (obj->size - 1))
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
+ return false;
+ } else {
+ if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
- if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
}
+ size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+ if (i915_gem_obj_ggtt_size(obj) != size)
+ return false;
+
+ if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
+ return false;
+
return true;
}
@@ -424,26 +291,28 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
int ret = 0;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
- obj_priv = obj->driver_private;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
- if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
+ if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return -EBUSY;
+ }
+
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
@@ -474,36 +343,65 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- if (args->tiling_mode != obj_priv->tiling_mode ||
- args->stride != obj_priv->stride) {
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is cleared.
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
*/
- if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
- else
- ret = i915_gem_object_put_fence_reg(obj);
- if (ret != 0) {
- WARN(ret != -ERESTARTSYS,
- "failed to reset object for tiling switch");
- args->tiling_mode = obj_priv->tiling_mode;
- args->stride = obj_priv->stride;
- goto err;
+
+ obj->map_and_fenceable =
+ !i915_gem_obj_ggtt_bound(obj) ||
+ (i915_gem_obj_ggtt_offset(obj) +
+ obj->base.size <= dev_priv->gtt.mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
+
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_align =
+ i915_gem_get_gtt_alignment(dev, obj->base.size,
+ args->tiling_mode,
+ false);
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
+ ret = i915_gem_object_ggtt_unbind(obj);
}
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
+ if (ret == 0) {
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
- obj_priv->tiling_mode = args->tiling_mode;
- obj_priv->stride = args->stride;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
+ }
+ }
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ kfree(obj->bit_17);
+ obj->bit_17 = NULL;
}
-err:
- drm_gem_object_unreference(obj);
+
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -514,22 +412,20 @@ err:
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
- obj_priv = obj->driver_private;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
mutex_lock(&dev->struct_mutex);
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
@@ -549,7 +445,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -560,16 +456,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static int
+static void
i915_gem_swizzle_page(struct page *page)
{
+ char temp[64];
char *vaddr;
int i;
- char temp[64];
vaddr = kmap(page);
- if (vaddr == NULL)
- return -ENOMEM;
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
@@ -578,65 +472,53 @@ i915_gem_swizzle_page(struct page *page)
}
kunmap(page);
-
- return 0;
}
void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count = obj->size >> PAGE_SHIFT;
+ struct sg_page_iter sg_iter;
int i;
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
- return;
-
- if (obj_priv->bit_17 == NULL)
+ if (obj->bit_17 == NULL)
return;
- for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj_priv->bit_17) != 0)) {
- int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
- if (ret != 0) {
- DRM_ERROR("Failed to swizzle page\n");
- return;
- }
- set_page_dirty(obj_priv->pages[i]);
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
}
+ i++;
}
}
void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count = obj->size >> PAGE_SHIFT;
+ struct sg_page_iter sg_iter;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
- return;
-
- if (obj_priv->bit_17 == NULL) {
- obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
- sizeof(long), GFP_KERNEL);
- if (obj_priv->bit_17 == NULL) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+ sizeof(long), GFP_KERNEL);
+ if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
- for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
- __set_bit(i, obj_priv->bit_17);
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+ __set_bit(i, obj->bit_17);
else
- __clear_bit(i, obj_priv->bit_17);
+ __clear_bit(i, obj->bit_17);
+ i++;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
new file mode 100644
index 00000000000..21ea92886a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -0,0 +1,711 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mempolicy.h>
+#include <linux/swap.h>
+
+#if defined(CONFIG_MMU_NOTIFIER)
+#include <linux/interval_tree.h>
+
+struct i915_mmu_notifier {
+ spinlock_t lock;
+ struct hlist_node node;
+ struct mmu_notifier mn;
+ struct rb_root objects;
+ struct drm_device *dev;
+ struct mm_struct *mm;
+ struct work_struct work;
+ unsigned long count;
+ unsigned long serial;
+};
+
+struct i915_mmu_object {
+ struct i915_mmu_notifier *mmu;
+ struct interval_tree_node it;
+ struct drm_i915_gem_object *obj;
+};
+
+static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
+ struct interval_tree_node *it = NULL;
+ unsigned long serial = 0;
+
+ end--; /* interval ranges are inclusive, but invalidate range is exclusive */
+ while (start < end) {
+ struct drm_i915_gem_object *obj;
+
+ obj = NULL;
+ spin_lock(&mn->lock);
+ if (serial == mn->serial)
+ it = interval_tree_iter_next(it, start, end);
+ else
+ it = interval_tree_iter_first(&mn->objects, start, end);
+ if (it != NULL) {
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ drm_gem_object_reference(&obj->base);
+ serial = mn->serial;
+ }
+ spin_unlock(&mn->lock);
+ if (obj == NULL)
+ return;
+
+ mutex_lock(&mn->dev->struct_mutex);
+ /* Cancel any active worker and force us to re-evaluate gup */
+ obj->userptr.work = NULL;
+
+ if (obj->pages != NULL) {
+ struct drm_i915_private *dev_priv = to_i915(mn->dev);
+ struct i915_vma *vma, *tmp;
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ WARN_ON(ret && ret != -EIO);
+ }
+ WARN_ON(i915_gem_object_put_pages(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ start = obj->userptr.ptr + obj->base.size;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&mn->dev->struct_mutex);
+ }
+}
+
+static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
+ .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+};
+
+static struct i915_mmu_notifier *
+__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_mmu_notifier *mmu;
+
+ /* Protected by dev->struct_mutex */
+ hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
+ if (mmu->mm == mm)
+ return mmu;
+
+ return NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_mmu_notifier *mmu;
+ int ret;
+
+ lockdep_assert_held(&dev->struct_mutex);
+
+ mmu = __i915_mmu_notifier_lookup(dev, mm);
+ if (mmu)
+ return mmu;
+
+ mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
+ if (mmu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&mmu->lock);
+ mmu->dev = dev;
+ mmu->mn.ops = &i915_gem_userptr_notifier;
+ mmu->mm = mm;
+ mmu->objects = RB_ROOT;
+ mmu->count = 0;
+ mmu->serial = 0;
+
+ /* Protected by mmap_sem (write-lock) */
+ ret = __mmu_notifier_register(&mmu->mn, mm);
+ if (ret) {
+ kfree(mmu);
+ return ERR_PTR(ret);
+ }
+
+ /* Protected by dev->struct_mutex */
+ hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
+ return mmu;
+}
+
+static void
+__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+{
+ struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
+ mmu_notifier_unregister(&mmu->mn, mmu->mm);
+ kfree(mmu);
+}
+
+static void
+__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
+{
+ lockdep_assert_held(&mmu->dev->struct_mutex);
+
+ /* Protected by dev->struct_mutex */
+ hash_del(&mmu->node);
+
+ /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
+ * We enter the function holding struct_mutex, therefore we need
+ * to drop our mutex prior to calling mmu_notifier_unregister in
+ * order to prevent lock inversion (and system-wide deadlock)
+ * between the mmap_sem and struct-mutex. Hence we defer the
+ * unregistration to a workqueue where we hold no locks.
+ */
+ INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
+ schedule_work(&mmu->work);
+}
+
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
+{
+ if (++mmu->serial == 0)
+ mmu->serial = 1;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
+ struct i915_mmu_object *mn)
+{
+ lockdep_assert_held(&mmu->dev->struct_mutex);
+
+ spin_lock(&mmu->lock);
+ interval_tree_remove(&mn->it, &mmu->objects);
+ __i915_mmu_notifier_update_serial(mmu);
+ spin_unlock(&mmu->lock);
+
+ /* Protected against _add() by dev->struct_mutex */
+ if (--mmu->count == 0)
+ __i915_mmu_notifier_destroy(mmu);
+}
+
+static int
+i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
+ struct i915_mmu_object *mn)
+{
+ struct interval_tree_node *it;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(mmu->dev);
+ if (ret)
+ return ret;
+
+ /* Make sure we drop the final active reference (and thereby
+ * remove the objects from the interval tree) before we do
+ * the check for overlapping objects.
+ */
+ i915_gem_retire_requests(mmu->dev);
+
+ /* Disallow overlapping userptr objects */
+ spin_lock(&mmu->lock);
+ it = interval_tree_iter_first(&mmu->objects,
+ mn->it.start, mn->it.last);
+ if (it) {
+ struct drm_i915_gem_object *obj;
+
+ /* We only need to check the first object in the range as it
+ * either has cancelled gup work queued and we need to
+ * return back to the user to give time for the gup-workers
+ * to flush their object references upon which the object will
+ * be removed from the interval-tree, or the the range is
+ * still in use by another client and the overlap is invalid.
+ */
+
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
+ } else {
+ interval_tree_insert(&mn->it, &mmu->objects);
+ __i915_mmu_notifier_update_serial(mmu);
+ ret = 0;
+ }
+ spin_unlock(&mmu->lock);
+ mutex_unlock(&mmu->dev->struct_mutex);
+
+ return ret;
+}
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+ struct i915_mmu_object *mn;
+
+ mn = obj->userptr.mn;
+ if (mn == NULL)
+ return;
+
+ i915_mmu_notifier_del(mn->mmu, mn);
+ obj->userptr.mn = NULL;
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+ unsigned flags)
+{
+ struct i915_mmu_notifier *mmu;
+ struct i915_mmu_object *mn;
+ int ret;
+
+ if (flags & I915_USERPTR_UNSYNCHRONIZED)
+ return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
+
+ down_write(&obj->userptr.mm->mmap_sem);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret == 0) {
+ mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
+ if (!IS_ERR(mmu))
+ mmu->count++; /* preemptive add to act as a refcount */
+ else
+ ret = PTR_ERR(mmu);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ }
+ up_write(&obj->userptr.mm->mmap_sem);
+ if (ret)
+ return ret;
+
+ mn = kzalloc(sizeof(*mn), GFP_KERNEL);
+ if (mn == NULL) {
+ ret = -ENOMEM;
+ goto destroy_mmu;
+ }
+
+ mn->mmu = mmu;
+ mn->it.start = obj->userptr.ptr;
+ mn->it.last = mn->it.start + obj->base.size - 1;
+ mn->obj = obj;
+
+ ret = i915_mmu_notifier_add(mmu, mn);
+ if (ret)
+ goto free_mn;
+
+ obj->userptr.mn = mn;
+ return 0;
+
+free_mn:
+ kfree(mn);
+destroy_mmu:
+ mutex_lock(&obj->base.dev->struct_mutex);
+ if (--mmu->count == 0)
+ __i915_mmu_notifier_destroy(mmu);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return ret;
+}
+
+#else
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+ unsigned flags)
+{
+ if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
+ return -ENODEV;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return 0;
+}
+#endif
+
+struct get_pages_work {
+ struct work_struct work;
+ struct drm_i915_gem_object *obj;
+ struct task_struct *task;
+};
+
+
+#if IS_ENABLED(CONFIG_SWIOTLB)
+#define swiotlb_active() swiotlb_nr_tbl()
+#else
+#define swiotlb_active() 0
+#endif
+
+static int
+st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
+{
+ struct scatterlist *sg;
+ int ret, n;
+
+ *st = kmalloc(sizeof(**st), GFP_KERNEL);
+ if (*st == NULL)
+ return -ENOMEM;
+
+ if (swiotlb_active()) {
+ ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
+ if (ret)
+ goto err;
+
+ for_each_sg((*st)->sgl, sg, num_pages, n)
+ sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
+ } else {
+ ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
+ 0, num_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kfree(*st);
+ *st = NULL;
+ return ret;
+}
+
+static void
+__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
+{
+ struct get_pages_work *work = container_of(_work, typeof(*work), work);
+ struct drm_i915_gem_object *obj = work->obj;
+ struct drm_device *dev = obj->base.dev;
+ const int num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ int pinned, ret;
+
+ ret = -ENOMEM;
+ pinned = 0;
+
+ pvec = kmalloc(num_pages*sizeof(struct page *),
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (pvec == NULL)
+ pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (pvec != NULL) {
+ struct mm_struct *mm = obj->userptr.mm;
+
+ down_read(&mm->mmap_sem);
+ while (pinned < num_pages) {
+ ret = get_user_pages(work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ num_pages - pinned,
+ !obj->userptr.read_only, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (obj->userptr.work != &work->work) {
+ ret = 0;
+ } else if (pinned == num_pages) {
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret == 0) {
+ list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
+ pinned = 0;
+ }
+ }
+
+ obj->userptr.work = ERR_PTR(ret);
+ obj->userptr.workers--;
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+
+ put_task_struct(work->task);
+ kfree(work);
+}
+
+static int
+i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+{
+ const int num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ int pinned, ret;
+
+ /* If userspace should engineer that these pages are replaced in
+ * the vma between us binding this page into the GTT and completion
+ * of rendering... Their loss. If they change the mapping of their
+ * pages they need to create a new bo to point to the new vma.
+ *
+ * However, that still leaves open the possibility of the vma
+ * being copied upon fork. Which falls under the same userspace
+ * synchronisation issue as a regular bo, except that this time
+ * the process may not be expecting that a particular piece of
+ * memory is tied to the GPU.
+ *
+ * Fortunately, we can hook into the mmu_notifier in order to
+ * discard the page references prior to anything nasty happening
+ * to the vma (discard or cloning) which should prevent the more
+ * egregious cases from causing harm.
+ */
+
+ pvec = NULL;
+ pinned = 0;
+ if (obj->userptr.mm == current->mm) {
+ pvec = kmalloc(num_pages*sizeof(struct page *),
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (pvec == NULL) {
+ pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (pvec == NULL)
+ return -ENOMEM;
+ }
+
+ pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
+ !obj->userptr.read_only, pvec);
+ }
+ if (pinned < num_pages) {
+ if (pinned < 0) {
+ ret = pinned;
+ pinned = 0;
+ } else {
+ /* Spawn a worker so that we can acquire the
+ * user pages without holding our mutex. Access
+ * to the user pages requires mmap_sem, and we have
+ * a strict lock ordering of mmap_sem, struct_mutex -
+ * we already hold struct_mutex here and so cannot
+ * call gup without encountering a lock inversion.
+ *
+ * Userspace will keep on repeating the operation
+ * (thanks to EAGAIN) until either we hit the fast
+ * path or the worker completes. If the worker is
+ * cancelled or superseded, the task is still run
+ * but the results ignored. (This leads to
+ * complications that we may have a stray object
+ * refcount that we need to be wary of when
+ * checking for existing objects during creation.)
+ * If the worker encounters an error, it reports
+ * that error back to this function through
+ * obj->userptr.work = ERR_PTR.
+ */
+ ret = -EAGAIN;
+ if (obj->userptr.work == NULL &&
+ obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
+ struct get_pages_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (work != NULL) {
+ obj->userptr.work = &work->work;
+ obj->userptr.workers++;
+
+ work->obj = obj;
+ drm_gem_object_reference(&obj->base);
+
+ work->task = current;
+ get_task_struct(work->task);
+
+ INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
+ schedule_work(&work->work);
+ } else
+ ret = -ENOMEM;
+ } else {
+ if (IS_ERR(obj->userptr.work)) {
+ ret = PTR_ERR(obj->userptr.work);
+ obj->userptr.work = NULL;
+ }
+ }
+ }
+ } else {
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret == 0) {
+ obj->userptr.work = NULL;
+ pinned = 0;
+ }
+ }
+
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+ return ret;
+}
+
+static void
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(obj->userptr.work != NULL);
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ obj->dirty = 0;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
+ struct page *page = sg_page(sg);
+
+ if (obj->dirty)
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+ obj->dirty = 0;
+
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static void
+i915_gem_userptr_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_userptr_release__mmu_notifier(obj);
+
+ if (obj->userptr.mm) {
+ mmput(obj->userptr.mm);
+ obj->userptr.mm = NULL;
+ }
+}
+
+static int
+i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
+{
+ if (obj->userptr.mn)
+ return 0;
+
+ return i915_gem_userptr_init__mmu_notifier(obj, 0);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+ .dmabuf_export = i915_gem_userptr_dmabuf_export,
+ .get_pages = i915_gem_userptr_get_pages,
+ .put_pages = i915_gem_userptr_put_pages,
+ .release = i915_gem_userptr_release,
+};
+
+/**
+ * Creates a new mm object that wraps some normal memory from the process
+ * context - user memory.
+ *
+ * We impose several restrictions upon the memory being mapped
+ * into the GPU.
+ * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
+ * 2. It cannot overlap any other userptr object in the same address space.
+ * 3. It must be normal system memory, not a pointer into another map of IO
+ * space (e.g. it must not be a GTT mmapping of another object).
+ * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * that is we limit the size to the total size of the GTT.
+ * 5. The bo is marked as being snoopable. The backing pages are left
+ * accessible directly by the CPU, but reads and writes by the GPU may
+ * incur the cost of a snoop (unless you have an LLC architecture).
+ *
+ * Synchronisation between multiple users and the GPU is left to userspace
+ * through the normal set-domain-ioctl. The kernel will enforce that the
+ * GPU relinquishes the VMA before it is returned back to the system
+ * i.e. upon free(), munmap() or process termination. However, the userspace
+ * malloc() library may not immediately relinquish the VMA after free() and
+ * instead reuse it whilst the GPU is still reading and writing to the VMA.
+ * Caveat emptor.
+ *
+ * Also note, that the object created here is not currently a "first class"
+ * object, in that several ioctls are banned. These are the CPU access
+ * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
+ * direct access via your pointer rather than use those ioctls.
+ *
+ * If you think this is a good interface to use to pass GPU memory between
+ * drivers, please use dma-buf instead. In fact, wherever possible use
+ * dma-buf instead.
+ */
+int
+i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_userptr *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+ u32 handle;
+
+ if (args->flags & ~(I915_USERPTR_READ_ONLY |
+ I915_USERPTR_UNSYNCHRONIZED))
+ return -EINVAL;
+
+ if (offset_in_page(args->user_ptr | args->user_size))
+ return -EINVAL;
+
+ if (args->user_size > dev_priv->gtt.base.total)
+ return -E2BIG;
+
+ if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
+ (char __user *)(unsigned long)args->user_ptr, args->user_size))
+ return -EFAULT;
+
+ if (args->flags & I915_USERPTR_READ_ONLY) {
+ /* On almost all of the current hw, we cannot tell the GPU that a
+ * page is readonly, so this is just a placeholder in the uAPI.
+ */
+ return -ENODEV;
+ }
+
+ /* Allocate the new object */
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ drm_gem_private_object_init(dev, &obj->base, args->user_size);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops);
+ obj->cache_level = I915_CACHE_LLC;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+ obj->userptr.ptr = args->user_ptr;
+ obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+
+ /* And keep a pointer to the current->mm for resolving the user pages
+ * at binding. This means that we need to hook into the mmu_notifier
+ * in order to detect if the mmu is destroyed.
+ */
+ ret = -ENOMEM;
+ if ((obj->userptr.mm = get_task_mm(current)))
+ ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
+ if (ret == 0)
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int
+i915_gem_init_userptr(struct drm_device *dev)
+{
+#if defined(CONFIG_MMU_NOTIFIER)
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ hash_init(dev_priv->mmu_notifiers);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 00000000000..66cf41765bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,1264 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include "i915_drv.h"
+
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
+
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
+ case VECS: return "vebox";
+ case VCS2: return "bsd2";
+ default: return "";
+ }
+}
+
+static const char *pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return " P";
+ else if (pinned < 0)
+ return " p";
+ else
+ return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+ if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+ e->err = -ENOSPC;
+ return false;
+ }
+
+ if (e->bytes == e->size - 1 || e->err)
+ return false;
+
+ return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ if (e->pos + len <= e->start) {
+ e->pos += len;
+ return false;
+ }
+
+ /* First vsnprintf needs to fit in its entirety for memmove */
+ if (len >= e->size) {
+ e->err = -EIO;
+ return false;
+ }
+
+ return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ /* If this is first printf in this window, adjust it so that
+ * start position matches start of the buffer
+ */
+
+ if (e->pos < e->start) {
+ const size_t off = e->start - e->pos;
+
+ /* Should not happen but be paranoid */
+ if (off > len || e->bytes) {
+ e->err = -EIO;
+ return;
+ }
+
+ memmove(e->buf, e->buf + off, len - off);
+ e->bytes = len - off;
+ e->pos = e->start;
+ return;
+ }
+
+ e->bytes += len;
+ e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+ const char *f, va_list args)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ va_list tmp;
+
+ va_copy(tmp, args);
+ len = vsnprintf(NULL, 0, f, tmp);
+ va_end(tmp);
+
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+
+ __i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+ const char *str)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ len = strlen(str);
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+ memcpy(e->buf + e->bytes, str, len);
+
+ __i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ err_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ err_printf(m, " %08x %8u %02x %02x %x %x",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->rseqno, err->wseqno);
+ err_puts(m, pin_flag(err->pinned));
+ err_puts(m, tiling_flag(err->tiling));
+ err_puts(m, dirty_flag(err->dirty));
+ err_puts(m, purgeable_flag(err->purgeable));
+ err_puts(m, err->userptr ? " userptr" : "");
+ err_puts(m, err->ring != -1 ? " " : "");
+ err_puts(m, ring_str(err->ring));
+ err_puts(m, i915_cache_level_str(err->cache_level));
+
+ if (err->name)
+ err_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ err_printf(m, " (fence: %d)", err->fence_reg);
+
+ err_puts(m, "\n");
+ err++;
+ }
+}
+
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+ switch (a) {
+ case HANGCHECK_IDLE:
+ return "idle";
+ case HANGCHECK_WAIT:
+ return "wait";
+ case HANGCHECK_ACTIVE:
+ return "active";
+ case HANGCHECK_KICK:
+ return "kick";
+ case HANGCHECK_HUNG:
+ return "hung";
+ }
+
+ return "unknown";
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev,
+ struct drm_i915_error_ring *ring)
+{
+ if (!ring->valid)
+ return;
+
+ err_printf(m, " HEAD: 0x%08x\n", ring->head);
+ err_printf(m, " TAIL: 0x%08x\n", ring->tail);
+ err_printf(m, " CTL: 0x%08x\n", ring->ctl);
+ err_printf(m, " HWS: 0x%08x\n", ring->hws);
+ err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
+ err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
+ err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
+ err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
+ err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
+ err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
+ }
+ err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
+ err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
+ lower_32_bits(ring->faddr));
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
+ err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
+ err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ ring->semaphore_mboxes[0],
+ ring->semaphore_seqno[0]);
+ err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ ring->semaphore_mboxes[1],
+ ring->semaphore_seqno[1]);
+ if (HAS_VEBOX(dev)) {
+ err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
+ ring->semaphore_mboxes[2],
+ ring->semaphore_seqno[2]);
+ }
+ }
+ if (USES_PPGTT(dev)) {
+ err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ int i;
+ for (i = 0; i < 4; i++)
+ err_printf(m, " PDP%d: 0x%016llx\n",
+ i, ring->vm_info.pdp[i]);
+ } else {
+ err_printf(m, " PP_DIR_BASE: 0x%08x\n",
+ ring->vm_info.pp_dir_base);
+ }
+ }
+ err_printf(m, " seqno: 0x%08x\n", ring->seqno);
+ err_printf(m, " waiting: %s\n", yesno(ring->waiting));
+ err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
+ err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
+ err_printf(m, " hangcheck: %s [%d]\n",
+ hangcheck_action_to_str(ring->hangcheck_action),
+ ring->hangcheck_score);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+ va_list args;
+
+ va_start(args, f);
+ i915_error_vprintf(e, f, args);
+ va_end(args);
+}
+
+static void print_error_obj(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_object *obj)
+{
+ int page, offset, elt;
+
+ for (page = offset = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n", offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+ const struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_device *dev = error_priv->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error = error_priv->error;
+ int i, j, offset, elt;
+ int max_hangcheck_score;
+
+ if (!error) {
+ err_printf(m, "no error state collected\n");
+ goto out;
+ }
+
+ err_printf(m, "%s\n", error->error_msg);
+ err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
+ err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ max_hangcheck_score = 0;
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ if (error->ring[i].hangcheck_score > max_hangcheck_score)
+ max_hangcheck_score = error->ring[i].hangcheck_score;
+ }
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ if (error->ring[i].hangcheck_score == max_hangcheck_score &&
+ error->ring[i].pid != -1) {
+ err_printf(m, "Active process (on ring %s): %s [%d]\n",
+ ring_str(i),
+ error->ring[i].comm,
+ error->ring[i].pid);
+ }
+ }
+ err_printf(m, "Reset count: %u\n", error->reset_count);
+ err_printf(m, "Suspend count: %u\n", error->suspend_count);
+ err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+ err_printf(m, "EIR: 0x%08x\n", error->eir);
+ err_printf(m, "IER: 0x%08x\n", error->ier);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+ err_printf(m, "CCID: 0x%08x\n", error->ccid);
+ err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
+ error->extra_instdone[i]);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, "ERROR: 0x%08x\n", error->error);
+ err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ err_printf(m, "%s command stream:\n", ring_str(i));
+ i915_ring_error_state(m, dev, &error->ring[i]);
+ }
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo[0],
+ error->active_bo_count[0]);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo[0],
+ error->pinned_bo_count[0]);
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ struct drm_i915_error_object *obj;
+
+ obj = error->ring[i].batchbuffer;
+ if (obj) {
+ err_puts(m, dev_priv->ring[i].name);
+ if (error->ring[i].pid != -1)
+ err_printf(m, " (submitted by %s [%d])",
+ error->ring[i].comm,
+ error->ring[i].pid);
+ err_printf(m, " --- gtt_offset = 0x%08x\n",
+ obj->gtt_offset);
+ print_error_obj(m, obj);
+ }
+
+ obj = error->ring[i].wa_batchbuffer;
+ if (obj) {
+ err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name, obj->gtt_offset);
+ print_error_obj(m, obj);
+ }
+
+ if (error->ring[i].num_requests) {
+ err_printf(m, "%s --- %d requests\n",
+ dev_priv->ring[i].name,
+ error->ring[i].num_requests);
+ for (j = 0; j < error->ring[i].num_requests; j++) {
+ err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ error->ring[i].requests[j].seqno,
+ error->ring[i].requests[j].jiffies,
+ error->ring[i].requests[j].tail);
+ }
+ }
+
+ if ((obj = error->ring[i].ringbuffer)) {
+ err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ print_error_obj(m, obj);
+ }
+
+ if ((obj = error->ring[i].hws_page)) {
+ err_printf(m, "%s --- HW Status = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ offset += 16;
+ }
+ }
+
+ if ((obj = error->ring[i].ctx)) {
+ err_printf(m, "%s --- HW Context = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ print_error_obj(m, obj);
+ }
+ }
+
+ if (error->overlay)
+ intel_overlay_print_error_state(m, error->overlay);
+
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
+out:
+ if (m->bytes == 0 && m->err)
+ return m->err;
+
+ return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+ size_t count, loff_t pos)
+{
+ memset(ebuf, 0, sizeof(*ebuf));
+
+ /* We need to have enough room to store any i915_error_state printf
+ * so that we can move it to start position.
+ */
+ ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size,
+ GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = 128;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL)
+ return -ENOMEM;
+
+ ebuf->start = pos;
+
+ return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ i915_error_object_free(error->ring[i].hws_page);
+ i915_error_object_free(error->ring[i].ctx);
+ kfree(error->ring[i].requests);
+ }
+
+ kfree(error->active_bo);
+ kfree(error->overlay);
+ kfree(error->display);
+ kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src,
+ struct i915_address_space *vm,
+ const int num_pages)
+{
+ struct drm_i915_error_object *dst;
+ int i;
+ u32 reloc_offset;
+
+ if (src == NULL || src->pages == NULL)
+ return NULL;
+
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
+ for (i = 0; i < num_pages; i++) {
+ unsigned long flags;
+ void *d;
+
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+
+ local_irq_save(flags);
+ if (src->cache_level == I915_CACHE_NONE &&
+ reloc_offset < dev_priv->gtt.mappable_end &&
+ src->has_global_gtt_mapping &&
+ i915_is_ggtt(vm)) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+ } else {
+ struct page *page;
+ void *s;
+
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&page, 1);
+ }
+ local_irq_restore(flags);
+
+ dst->pages[i] = d;
+
+ reloc_offset += PAGE_SIZE;
+ }
+ dst->page_count = num_pages;
+
+ return dst;
+
+unwind:
+ while (i--)
+ kfree(dst->pages[i]);
+ kfree(dst);
+ return NULL;
+}
+#define i915_error_object_create(dev_priv, src, vm) \
+ i915_error_object_create_sized((dev_priv), (src), (vm), \
+ (src)->base.size>>PAGE_SHIFT)
+
+#define i915_error_ggtt_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
+ (src)->base.size>>PAGE_SHIFT)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
+ err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (i915_gem_obj_is_pinned(obj))
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->userptr = obj->userptr.mm != NULL;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct i915_vma *vma;
+ int i = 0;
+
+ list_for_each_entry(vma, head, mm_list) {
+ capture_bo(err++, vma->obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, global_list) {
+ if (!i915_gem_obj_is_pinned(obj))
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+/* Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ int *ring_id)
+{
+ uint32_t error_code = 0;
+ int i;
+
+ /* IPEHR would be an ideal way to detect errors, as it's the gross
+ * measure of "the command that hung." However, has some very common
+ * synchronization commands which almost always appear in the case
+ * strictly a client bug. Use instdone to differentiate those some.
+ */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
+ if (ring_id)
+ *ring_id = i;
+
+ return error->ring[i].ipehr ^ error->ring[i].instdone;
+ }
+ }
+
+ return error_code;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 8:
+ case 7:
+ case 6:
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
+ ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+ ering->semaphore_mboxes[0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ ering->semaphore_mboxes[1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+ }
+
+ if (HAS_VEBOX(dev)) {
+ ering->semaphore_mboxes[2] =
+ I915_READ(RING_SYNC_2(ring->mmio_base));
+ ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
+ ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+ ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
+ ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
+ ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
+ ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+ }
+ ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+ } else {
+ ering->faddr = I915_READ(DMA_FADD_I8XX);
+ ering->ipeir = I915_READ(IPEIR);
+ ering->ipehr = I915_READ(IPEHR);
+ ering->instdone = I915_READ(INSTDONE);
+ }
+
+ ering->waiting = waitqueue_active(&ring->irq_queue);
+ ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
+ ering->seqno = ring->get_seqno(ring, false);
+ ering->acthd = intel_ring_get_active_head(ring);
+ ering->head = I915_READ_HEAD(ring);
+ ering->tail = I915_READ_TAIL(ring);
+ ering->ctl = I915_READ_CTL(ring);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ int mmio;
+
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ default:
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ /* XXX: gen8 returns to sanity */
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ ering->hws = I915_READ(mmio);
+ }
+
+ ering->cpu_ring_head = ring->buffer->head;
+ ering->cpu_ring_tail = ring->buffer->tail;
+
+ ering->hangcheck_score = ring->hangcheck.score;
+ ering->hangcheck_action = ring->hangcheck.action;
+
+ if (USES_PPGTT(dev)) {
+ int i;
+
+ ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 8:
+ for (i = 0; i < 4; i++) {
+ ering->vm_info.pdp[i] =
+ I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ ering->vm_info.pdp[i] <<= 32;
+ ering->vm_info.pdp[i] |=
+ I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ }
+ break;
+ case 7:
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE(ring));
+ break;
+ case 6:
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE_READ(ring));
+ break;
+ }
+ }
+}
+
+
+static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+ struct drm_i915_error_state *error,
+ struct drm_i915_error_ring *ering)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* Currently render ring is the only HW context user */
+ if (ring->id != RCS || !error->ccid)
+ return;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+ ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
+ break;
+ }
+ }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
+
+ error->ring[i].pid = -1;
+
+ if (ring->dev == NULL)
+ continue;
+
+ error->ring[i].valid = true;
+
+ i915_record_ring_state(dev, ring, &error->ring[i]);
+
+ request = i915_gem_find_active_request(ring);
+ if (request) {
+ /* We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ error->ring[i].batchbuffer =
+ i915_error_object_create(dev_priv,
+ request->batch_obj,
+ request->ctx ?
+ request->ctx->vm :
+ &dev_priv->gtt.base);
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
+ ring->scratch.obj)
+ error->ring[i].wa_batchbuffer =
+ i915_error_ggtt_object_create(dev_priv,
+ ring->scratch.obj);
+
+ if (request->file_priv) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(request->file_priv->file->pid,
+ PIDTYPE_PID);
+ if (task) {
+ strcpy(error->ring[i].comm, task->comm);
+ error->ring[i].pid = task->pid;
+ }
+ rcu_read_unlock();
+ }
+ }
+
+ error->ring[i].ringbuffer =
+ i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
+
+ if (ring->status_page.obj)
+ error->ring[i].hws_page =
+ i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+
+ i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kcalloc(count, sizeof(*error->ring[i].requests),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
+/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
+ * VM.
+ */
+static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct i915_address_space *vm,
+ const int ndx)
+{
+ struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int i;
+
+ i = 0;
+ list_for_each_entry(vma, &vm->active_list, mm_list)
+ i++;
+ error->active_bo_count[ndx] = i;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ if (i915_gem_obj_is_pinned(obj))
+ i++;
+ error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
+
+ if (i) {
+ active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
+ if (active_bo)
+ pinned_bo = active_bo + error->active_bo_count[ndx];
+ }
+
+ if (active_bo)
+ error->active_bo_count[ndx] =
+ capture_active_bo(active_bo,
+ error->active_bo_count[ndx],
+ &vm->active_list);
+
+ if (pinned_bo)
+ error->pinned_bo_count[ndx] =
+ capture_pinned_bo(pinned_bo,
+ error->pinned_bo_count[ndx],
+ &dev_priv->mm.bound_list);
+ error->active_bo[ndx] = active_bo;
+ error->pinned_bo[ndx] = pinned_bo;
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct i915_address_space *vm;
+ int cnt = 0, i = 0;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ cnt++;
+
+ error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
+ error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
+ error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
+ GFP_ATOMIC);
+ error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
+ GFP_ATOMIC);
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_capture_vm(dev_priv, error, vm, i++);
+}
+
+/* Capture all registers which don't fit into another category. */
+static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* General organization
+ * 1. Registers specific to a single generation
+ * 2. Registers which belong to multiple generations
+ * 3. Feature specific registers.
+ * 4. Everything else
+ * Please try to follow the order.
+ */
+
+ /* 1: Registers specific to a single generation */
+ if (IS_VALLEYVIEW(dev)) {
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ }
+
+ if (IS_GEN7(dev))
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ if (IS_GEN6(dev)) {
+ error->forcewake = I915_READ(FORCEWAKE);
+ error->gab_ctl = I915_READ(GAB_CTL);
+ error->gfx_mode = I915_READ(GFX_MODE);
+ }
+
+ /* 2: Registers which belong to multiple generations */
+ if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->derrmr = I915_READ(DERRMR);
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ /* 3: Feature specific registers */
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ error->gam_ecochk = I915_READ(GAM_ECOCHK);
+ error->gac_eco = I915_READ(GAC_ECO_BITS);
+ }
+
+ /* 4: Everything else */
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else {
+ if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+ }
+
+ /* 4: Everything else */
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+}
+
+static void i915_error_capture_msg(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ bool wedged,
+ const char *error_msg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ecode;
+ int ring_id = -1, len;
+
+ ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+
+ len = scnprintf(error->error_msg, sizeof(error->error_msg),
+ "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+
+ if (ring_id != -1 && error->ring[ring_id].pid != -1)
+ len += scnprintf(error->error_msg + len,
+ sizeof(error->error_msg) - len,
+ ", in %s [%d]",
+ error->ring[ring_id].comm,
+ error->ring[ring_id].pid);
+
+ scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
+ ", reason: %s, action: %s",
+ error_msg,
+ wedged ? "reset" : "continue");
+}
+
+static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ error->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ error->suspend_count = dev_priv->suspend_count;
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev, bool wedged,
+ const char *error_msg)
+{
+ static bool warned;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ /* Account for pipe specific data like PIPE*STAT */
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error) {
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
+ }
+
+ kref_init(&error->ref);
+
+ i915_capture_gen_state(dev_priv, error);
+ i915_capture_reg_state(dev_priv, error);
+ i915_gem_capture_buffers(dev_priv, error);
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
+
+ do_gettimeofday(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
+
+ i915_error_capture_msg(dev, error, wedged, error_msg);
+ DRM_INFO("%s\n", error->error_msg);
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error) {
+ i915_error_state_free(&error->ref);
+ return;
+ }
+
+ if (!warned) {
+ DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+ warned = true;
+ }
+}
+
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error_priv->error = dev_priv->gpu_error.first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return " snooped or LLC";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
+ default: return "";
+ }
+}
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ case 8:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 13b028994b2..2e0613e2625 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -31,9 +31,9 @@
*/
#include <linux/compat.h>
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
(unsigned long)request);
}
-drm_ioctl_compat_t *i915_compat_ioctls[] = {
+static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,6 +189,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
+#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -207,7 +208,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
@@ -217,3 +218,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7cd8110051b..c05c84f3f09 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,151 +26,687 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sysrq.h>
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#define MAX_NOPID ((u32)~0)
+static const u32 hpd_ibx[] = {
+ [HPD_CRT] = SDE_CRT_HOTPLUG,
+ [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG
+};
+
+static const u32 hpd_cpt[] = {
+ [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
+ [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+};
+
+static const u32 hpd_mask_i915[] = {
+ [HPD_CRT] = CRT_HOTPLUG_INT_EN,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+};
+
+static const u32 hpd_status_g4x[] = {
+ [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
+ [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+/* IIR can theoretically queue up two events. Be paranoid. */
+#define GEN8_IRQ_RESET_NDX(type, which) do { \
+ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IMR(which)); \
+ I915_WRITE(GEN8_##type##_IER(which), 0); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+} while (0)
+
+#define GEN5_IRQ_RESET(type) do { \
+ I915_WRITE(type##IMR, 0xffffffff); \
+ POSTING_READ(type##IMR); \
+ I915_WRITE(type##IER, 0); \
+ I915_WRITE(type##IIR, 0xffffffff); \
+ POSTING_READ(type##IIR); \
+ I915_WRITE(type##IIR, 0xffffffff); \
+ POSTING_READ(type##IIR); \
+} while (0)
+
+/*
+ * We should clear IMR at preinstall/uninstall, and just check at postinstall.
+ */
+#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
+ u32 val = I915_READ(reg); \
+ if (val) { \
+ WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
+ (reg), val); \
+ I915_WRITE((reg), 0xffffffff); \
+ POSTING_READ(reg); \
+ I915_WRITE((reg), 0xffffffff); \
+ POSTING_READ(reg); \
+ } \
+} while (0)
+
+#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
+ GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
+ I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
+ POSTING_READ(GEN8_##type##_IER(which)); \
+} while (0)
+
+#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
+ GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
+ I915_WRITE(type##IMR, (imr_val)); \
+ I915_WRITE(type##IER, (ier_val)); \
+ POSTING_READ(type##IER); \
+} while (0)
+
+/* For display hotplug interrupt */
+static void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
+
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
+
+static void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
+
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
/**
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
+ * ilk_update_gt_irq - update GTIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
*/
-#define I915_INTERRUPT_ENABLE_FIX \
- (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
-/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
-#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
- PIPE_VBLANK_INTERRUPT_STATUS)
+ dev_priv->gt_irq_mask &= ~interrupt_mask;
+ dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+}
-#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
- PIPE_VBLANK_INTERRUPT_ENABLE)
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ ilk_update_gt_irq(dev_priv, mask, mask);
+}
-#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
- DRM_I915_VBLANK_PIPE_B)
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ ilk_update_gt_irq(dev_priv, mask, 0);
+}
-void
-ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+/**
+ * snb_update_pm_irq - update GEN6_PMIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
- dev_priv->gt_irq_mask_reg &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ uint32_t new_val;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
+
+ new_val = dev_priv->pm_irq_mask;
+ new_val &= ~interrupt_mask;
+ new_val |= (~enabled_irq_mask & interrupt_mask);
+
+ if (new_val != dev_priv->pm_irq_mask) {
+ dev_priv->pm_irq_mask = new_val;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+ POSTING_READ(GEN6_PMIMR);
}
}
-static inline void
-ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
- dev_priv->gt_irq_mask_reg |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ snb_update_pm_irq(dev_priv, mask, mask);
+}
+
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ snb_update_pm_irq(dev_priv, mask, 0);
+}
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ return false;
}
+
+ return true;
}
-/* For display hotplug interrupt */
-void
-ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+/**
+ * bdw_update_pm_irq - update GT interrupt 2
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ *
+ * Copied from the snb function, updated with relevant register offsets
+ */
+static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ uint32_t new_val;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
+
+ new_val = dev_priv->pm_irq_mask;
+ new_val &= ~interrupt_mask;
+ new_val |= (~enabled_irq_mask & interrupt_mask);
+
+ if (new_val != dev_priv->pm_irq_mask) {
+ dev_priv->pm_irq_mask = new_val;
+ I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
+ POSTING_READ(GEN8_GT_IMR(2));
}
}
-static inline void
-ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ bdw_update_pm_irq(dev_priv, mask, mask);
+}
+
+void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ bdw_update_pm_irq(dev_priv, mask, 0);
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->pch_fifo_underrun_disabled)
+ return false;
}
+
+ return true;
}
-void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+void i9xx_check_fifo_underruns(struct drm_device *dev)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ for_each_intel_crtc(dev, crtc) {
+ u32 reg = PIPESTAT(crtc->pipe);
+ u32 pipestat;
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ continue;
+
+ pipestat = I915_READ(reg) & 0xffff0000;
+ if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ continue;
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+
+ DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
}
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-static inline void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0xffff0000;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable) {
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+ } else {
+ if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
}
-static inline u32
-i915_pipestat(int pipe)
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
- if (pipe == 0)
- return PIPEASTAT;
- if (pipe == 1)
- return PIPEBSTAT;
- BUG();
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+ DE_PIPEB_FIFO_UNDERRUN;
+
+ if (enable)
+ ironlake_enable_display_irq(dev_priv, bit);
+ else
+ ironlake_disable_display_irq(dev_priv, bit);
}
-void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
{
- if ((dev_priv->pipestat[pipe] & mask) != mask) {
- u32 reg = i915_pipestat(pipe);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (enable) {
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
+ if (!ivb_can_enable_err_int(dev))
+ return;
- dev_priv->pipestat[pipe] |= mask;
- /* Enable the interrupt, clear any pending status */
- I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- (void) I915_READ(reg);
+ ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ } else {
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (old &&
+ I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
}
}
-void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
- if ((dev_priv->pipestat[pipe] & mask) != 0) {
- u32 reg = i915_pipestat(pipe);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ assert_spin_locked(&dev_priv->irq_lock);
- dev_priv->pipestat[pipe] &= ~mask;
- I915_WRITE(reg, dev_priv->pipestat[pipe]);
- (void) I915_READ(reg);
+ if (enable)
+ dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+ else
+ dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
+/**
+ * ibx_display_interrupt_update - update SDEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t sdeimr = I915_READ(SDEIMR);
+ sdeimr &= ~interrupt_mask;
+ sdeimr |= (~enabled_irq_mask & interrupt_mask);
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
+
+ I915_WRITE(SDEIMR, sdeimr);
+ POSTING_READ(SDEIMR);
+}
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), 0)
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+ if (enable)
+ ibx_enable_display_interrupt(dev_priv, bit);
+ else
+ ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (enable) {
+ I915_WRITE(SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+ if (!cpt_can_enable_serr_int(dev))
+ return;
+
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ } else {
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (old && I915_READ(SERR_INT) &
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
+ }
}
}
/**
- * intel_enable_asle - enable ASLE interrupt for OpRegion
+ * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pipe: pipe
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable CPU fifo underruns for a specific
+ * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
+ * reporting for one pipe may also disable all the other CPU error interruts for
+ * the other pipes, due to the fact that there's just one interrupt mask/enable
+ * bit for all the pipes.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool old;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ old = !intel_crtc->cpu_fifo_underrun_disabled;
+ intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+ if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+ i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ else if (IS_GEN7(dev))
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN8(dev))
+ broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+
+ return old;
+}
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return ret;
+}
+
+static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return !intel_crtc->cpu_fifo_underrun_disabled;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
*/
-void intel_enable_asle (struct drm_device *dev)
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool old;
- if (IS_IRONLAKE(dev))
- ironlake_enable_display_irq(dev_priv, DE_GSE);
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ old = !intel_crtc->pch_fifo_underrun_disabled;
+ intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+ if (HAS_PCH_IBX(dev))
+ ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
- i915_enable_pipestat(dev_priv, 1,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ return old;
+}
+
+
+static void
+__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 enable_mask, u32 status_mask)
+{
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask))
+ return;
+
+ if ((pipestat & enable_mask) == enable_mask)
+ return;
+
+ dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+
+ /* Enable the interrupt, clear any pending status */
+ pipestat |= enable_mask | status_mask;
+ I915_WRITE(reg, pipestat);
+ POSTING_READ(reg);
+}
+
+static void
+__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 enable_mask, u32 status_mask)
+{
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask))
+ return;
+
+ if ((pipestat & enable_mask) == 0)
+ return;
+
+ dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+
+ pipestat &= ~enable_mask;
+ I915_WRITE(reg, pipestat);
+ POSTING_READ(reg);
+}
+
+static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
+{
+ u32 enable_mask = status_mask << 16;
+
+ /*
+ * On pipe A we don't support the PSR interrupt yet,
+ * on pipe B and C the same bit MBZ.
+ */
+ if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+ return 0;
+ /*
+ * On pipe B and C we don't support the PSR interrupt yet, on pipe
+ * A the same bit is for perf counters which we don't use either.
+ */
+ if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+ return 0;
+
+ enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
+ SPRITE0_FLIP_DONE_INT_EN_VLV |
+ SPRITE1_FLIP_DONE_INT_EN_VLV);
+ if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
+ enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
+ if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
+ enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
+
+ return enable_mask;
+}
+
+void
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask)
+{
+ u32 enable_mask;
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ status_mask);
+ else
+ enable_mask = status_mask << 16;
+ __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
+void
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask)
+{
+ u32 enable_mask;
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ status_mask);
+ else
+ enable_mask = status_mask << 16;
+ __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+}
+
+/**
+ * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
+ */
+static void i915_enable_asle_pipestat(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
+ return;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, PIPE_A,
+ PIPE_LEGACY_BLC_EVENT_STATUS);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
/**
@@ -185,158 +721,1620 @@ void intel_enable_asle (struct drm_device *dev)
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
- return 1;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Locking is horribly broken here, but whatever. */
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ return intel_crtc->active;
+ } else {
+ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ }
+}
+
+/*
+ * This timing diagram depicts the video signal in and
+ * around the vertical blanking period.
+ *
+ * Assumptions about the fictitious mode used in this example:
+ * vblank_start >= 3
+ * vsync_start = vblank_start + 1
+ * vsync_end = vblank_start + 2
+ * vtotal = vblank_start + 3
+ *
+ * start of vblank:
+ * latch double buffered registers
+ * increment frame counter (ctg+)
+ * generate start of vblank interrupt (gen4+)
+ * |
+ * | frame start:
+ * | generate frame start interrupt (aka. vblank interrupt) (gmch)
+ * | may be shifted forward 1-3 extra lines via PIPECONF
+ * | |
+ * | | start of vsync:
+ * | | generate vsync interrupt
+ * | | |
+ * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
+ * . \hs/ . \hs/ \hs/ \hs/ . \hs/
+ * ----va---> <-----------------vb--------------------> <--------va-------------
+ * | | <----vs-----> |
+ * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
+ * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
+ * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
+ * | | |
+ * last visible pixel first visible pixel
+ * | increment frame counter (gen3/4)
+ * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
+ *
+ * x = horizontal active
+ * _ = horizontal blanking
+ * hs = horizontal sync
+ * va = vertical active
+ * vb = vertical blanking
+ * vs = vertical sync
+ * vbs = vblank_start (number)
+ *
+ * Summary:
+ * - most events happen at the start of horizontal sync
+ * - frame start happens at the start of horizontal blank, 1-4 lines
+ * (depending on PIPECONF settings) after the start of vblank
+ * - gen3/4 pixel and frame counter are synchronized with the start
+ * of horizontal active on the first line of vertical active
+ */
+
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ /* Gen2 doesn't have a hardware frame counter */
return 0;
}
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low, count;
-
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ const struct drm_display_mode *mode =
+ &intel_crtc->config.adjusted_mode;
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ } else {
+ enum transcoder cpu_transcoder = (enum transcoder) pipe;
+
+ htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+ hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
+ vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+ if ((I915_READ(PIPECONF(cpu_transcoder)) &
+ PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ }
+
+ /* Convert to pixel count */
+ vbl_start *= htotal;
+
+ /* Start of vblank event occurs at start of hsync */
+ vbl_start -= htotal - hsync_start;
+
+ high_frame = PIPEFRAME(pipe);
+ low_frame = PIPEFRAMEPIXEL(pipe);
+
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
- high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
- PIPE_FRAME_LOW_SHIFT);
- high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
+ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ(low_frame);
+ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
- count = (high1 << 8) | low;
+ high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ pixel = low & PIPE_PIXEL_MASK;
+ low >>= PIPE_FRAME_LOW_SHIFT;
- return count;
+ /*
+ * The frame counter increments at beginning of active.
+ * Cook up a vblank counter by also checking the pixel
+ * counter against vblank start.
+ */
+ return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = PIPE_FRMCOUNT_GM45(pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
return I915_READ(reg);
}
+/* raw reads, only for fast reads of display block, no need for forcewake etc. */
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+
+static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
+ int position, vtotal;
+
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ if (IS_GEN2(dev))
+ position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+ else
+ position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+
+ /*
+ * See update_scanline_offset() for the details on the
+ * scanline_offset adjustment.
+ */
+ return (position + crtc->scanline_offset) % vtotal;
+}
+
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ int position;
+ int vbl_start, vbl_end, hsync_start, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+ unsigned long irqflags;
+
+ if (!intel_crtc->active) {
+ DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vtotal = mode->crtc_vtotal;
+ vbl_start = mode->crtc_vblank_start;
+ vbl_end = mode->crtc_vblank_end;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ vbl_end /= 2;
+ vtotal /= 2;
+ }
+
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /*
+ * Lock uncore.lock, as we will do multiple timing critical raw
+ * register reads, potentially with preemption disabled, so the
+ * following code must not block on uncore.lock.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = __intel_get_crtc_scanline(intel_crtc);
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ /* convert to pixel counts */
+ vbl_start *= htotal;
+ vbl_end *= htotal;
+ vtotal *= htotal;
+
+ /*
+ * In interlaced modes, the pixel counter counts all pixels,
+ * so one field will have htotal more pixels. In order to avoid
+ * the reported position from jumping backwards when the pixel
+ * counter is beyond the length of the shorter field, just
+ * clamp the position the length of the shorter field. This
+ * matches how the scanline counter based position works since
+ * the scanline counter doesn't count the two half lines.
+ */
+ if (position >= vtotal)
+ position = vtotal - 1;
+
+ /*
+ * Start of vblank interrupt is triggered at start of hsync,
+ * just prior to the first active line of vblank. However we
+ * consider lines to start at the leading edge of horizontal
+ * active. So, should we get here before we've crossed into
+ * the horizontal active of the first line in vblank, we would
+ * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
+ * always add htotal-hsync_start to the current pixel position.
+ */
+ position = (position + htotal - hsync_start) % vtotal;
+ }
+
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ in_vbl = position >= vbl_start && position < vbl_end;
+
+ /*
+ * While in vblank, position will be negative
+ * counting up towards 0 at vbl_end. And outside
+ * vblank, position will be positive counting
+ * up since vbl_end.
+ */
+ if (position >= vbl_start)
+ position -= vbl_end;
+ else
+ position += vtotal - vbl_end;
+
+ if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ *vpos = position;
+ *hpos = 0;
+ } else {
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ unsigned long irqflags;
+ int position;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ position = __intel_get_crtc_scanline(crtc);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return position;
+}
+
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *crtc;
+
+ if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc == NULL) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ if (!crtc->enabled) {
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ return -EBUSY;
+ }
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+ vblank_time, flags,
+ crtc,
+ &to_intel_crtc(crtc)->config.adjusted_mode);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ enum drm_connector_status old_status;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ if (old_status == connector->status)
+ return false;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ return true;
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
+#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
+
static void i915_hotplug_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- hotplug_work);
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_connector *intel_connector;
+ struct intel_encoder *intel_encoder;
struct drm_connector *connector;
+ unsigned long irqflags;
+ bool hpd_disabled = false;
+ bool changed = false;
+ u32 hpd_event_bits;
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
-
- if (intel_output->hot_plug)
- (*intel_output->hot_plug) (intel_output);
+ /* HPD irq before everything is fully set up. */
+ if (!dev_priv->enable_hotplug_processing)
+ return;
+
+ mutex_lock(&mode_config->mutex);
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ hpd_event_bits = dev_priv->hpd_event_bits;
+ dev_priv->hpd_event_bits = 0;
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ intel_connector = to_intel_connector(connector);
+ intel_encoder = intel_connector->encoder;
+ if (intel_encoder->hpd_pin > HPD_NONE &&
+ dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
+ connector->polled == DRM_CONNECTOR_POLL_HPD) {
+ DRM_INFO("HPD interrupt storm detected on connector %s: "
+ "switching from hotplug detection to polling\n",
+ connector->name);
+ dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+ hpd_disabled = true;
+ }
+ if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+ connector->name, intel_encoder->hpd_pin);
}
}
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ /* if there were no outputs to poll, poll was disabled,
+ * therefore make sure it's enabled when disabling HPD on
+ * some connectors */
+ if (hpd_disabled) {
+ drm_kms_helper_poll_enable(dev);
+ mod_timer(&dev_priv->hotplug_reenable_timer,
+ jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+ }
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ intel_connector = to_intel_connector(connector);
+ intel_encoder = intel_connector->encoder;
+ if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ if (intel_encoder->hot_plug)
+ intel_encoder->hot_plug(intel_encoder);
+ if (intel_hpd_irq_event(dev, connector))
+ changed = true;
+ }
+ }
+ mutex_unlock(&mode_config->mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
}
-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir;
- u32 new_de_iir, new_gt_iir, new_pch_iir;
- struct drm_i915_master_private *master_priv;
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+}
- /* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- (void)I915_READ(DEIER);
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_delay;
+
+ spin_lock(&mchdev_lock);
+
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+ new_delay = dev_priv->ips.cur_delay;
+
+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = I915_READ(RCPREVBSYTUPAVG);
+ busy_down = I915_READ(RCPREVBSYTDNAVG);
+ max_avg = I915_READ(RCBMAXAVG);
+ min_avg = I915_READ(RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ if (busy_up > max_avg) {
+ if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.cur_delay - 1;
+ if (new_delay < dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.max_delay;
+ } else if (busy_down < min_avg) {
+ if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.cur_delay + 1;
+ if (new_delay > dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.min_delay;
+ }
- de_iir = I915_READ(DEIIR);
- gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
+ if (ironlake_set_drps(dev, new_delay))
+ dev_priv->ips.cur_delay = new_delay;
+
+ spin_unlock(&mchdev_lock);
+
+ return;
+}
+
+static void notify_ring(struct drm_device *dev,
+ struct intel_engine_cs *ring)
+{
+ if (!intel_ring_initialized(ring))
+ return;
+
+ trace_i915_gem_request_complete(ring);
+
+ wake_up_all(&ring->irq_queue);
+ i915_queue_hangcheck(dev);
+}
+
+static void gen6_pm_rps_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, rps.work);
+ u32 pm_iir;
+ int new_delay, adj;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
+ if (IS_BROADWELL(dev_priv->dev))
+ bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ else {
+ /* Make sure not to corrupt PMIMR state used by ringbuffer */
+ snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
+
+ if ((pm_iir & dev_priv->pm_rps_events) == 0)
+ return;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ adj = dev_priv->rps.last_adj;
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else
+ adj = 1;
+ new_delay = dev_priv->rps.cur_freq + adj;
+
+ /*
+ * For better performance, jump directly
+ * to RPe if we're below it.
+ */
+ if (new_delay < dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
+ else
+ new_delay = dev_priv->rps.min_freq_softlimit;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else
+ adj = -1;
+ new_delay = dev_priv->rps.cur_freq + adj;
+ } else { /* unknown event */
+ new_delay = dev_priv->rps.cur_freq;
+ }
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ new_delay = clamp_t(int, new_delay,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit);
+
+ dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ valleyview_set_rps(dev_priv->dev, new_delay);
+ else
+ gen6_set_rps(dev_priv->dev, new_delay);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+static void ivybridge_parity_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, l3_parity.error_work);
+ u32 error_status, row, bank, subbank;
+ char *parity_event[6];
+ uint32_t misccpctl;
+ unsigned long flags;
+ uint8_t slice = 0;
+
+ /* We must turn off DOP level clock gating to access the L3 registers.
+ * In order to prevent a get/put style interface, acquire struct mutex
+ * any time we access those registers.
+ */
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /* If we've screwed up tracking, just let the interrupt fire again */
+ if (WARN_ON(!dev_priv->l3_parity.which_slice))
+ goto out;
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+ u32 reg;
+
+ slice--;
+ if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+ break;
+
+ dev_priv->l3_parity.which_slice &= ~(1<<slice);
+
+ reg = GEN7_L3CDERRST1 + (slice * 0x200);
+
+ error_status = I915_READ(reg);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+ I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(reg);
+
+ parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+ parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+ parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+ parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+ parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+ parity_event[5] = NULL;
+
+ kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+ KOBJ_CHANGE, parity_event);
+
+ DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+ slice, row, bank, subbank);
+
+ kfree(parity_event[4]);
+ kfree(parity_event[3]);
+ kfree(parity_event[2]);
+ kfree(parity_event[1]);
+ }
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+out:
+ WARN_ON(dev_priv->l3_parity.which_slice);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_L3_DPF(dev))
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+ ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+ spin_unlock(&dev_priv->irq_lock);
+
+ iir &= GT_PARITY_ERROR(dev);
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+ dev_priv->l3_parity.which_slice |= 1 << 1;
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ dev_priv->l3_parity.which_slice |= 1 << 0;
+
+ queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
+}
+
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir &
+ (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
+static void snb_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+
+ if (gt_iir &
+ (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+
+ if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+ GT_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
+ i915_handle_error(dev, false, "GT error interrupt 0x%08x",
+ gt_iir);
+ }
+
+ if (gt_iir & GT_PARITY_ERROR(dev))
+ ivybridge_parity_error_irq_handler(dev, gt_iir);
+}
+
+static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ if ((pm_iir & dev_priv->pm_rps_events) == 0)
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ spin_unlock(&dev_priv->irq_lock);
+
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+}
+
+static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 master_ctl)
+{
+ u32 rcs, bcs, vcs;
+ uint32_t tmp = 0;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ tmp = I915_READ(GEN8_GT_IIR(0));
+ if (tmp) {
+ ret = IRQ_HANDLED;
+ rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
+ bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ if (rcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (bcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+ I915_WRITE(GEN8_GT_IIR(0), tmp);
+ } else
+ DRM_ERROR("The master control interrupt lied (GT0)!\n");
+ }
+
+ if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+ tmp = I915_READ(GEN8_GT_IIR(1));
+ if (tmp) {
+ ret = IRQ_HANDLED;
+ vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
+ if (vcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
+ if (vcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS2]);
+ I915_WRITE(GEN8_GT_IIR(1), tmp);
+ } else
+ DRM_ERROR("The master control interrupt lied (GT1)!\n");
+ }
+
+ if (master_ctl & GEN8_GT_PM_IRQ) {
+ tmp = I915_READ(GEN8_GT_IIR(2));
+ if (tmp & dev_priv->pm_rps_events) {
+ ret = IRQ_HANDLED;
+ gen8_rps_irq_handler(dev_priv, tmp);
+ I915_WRITE(GEN8_GT_IIR(2),
+ tmp & dev_priv->pm_rps_events);
+ } else
+ DRM_ERROR("The master control interrupt lied (PM)!\n");
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ tmp = I915_READ(GEN8_GT_IIR(3));
+ if (tmp) {
+ ret = IRQ_HANDLED;
+ vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
+ if (vcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VECS]);
+ I915_WRITE(GEN8_GT_IIR(3), tmp);
+ } else
+ DRM_ERROR("The master control interrupt lied (GT3)!\n");
+ }
+
+ return ret;
+}
+
+#define HPD_STORM_DETECT_PERIOD 1000
+#define HPD_STORM_THRESHOLD 5
+
+static inline void intel_hpd_irq_handler(struct drm_device *dev,
+ u32 hotplug_trigger,
+ const u32 *hpd)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ bool storm_detected = false;
+
+ if (!hotplug_trigger)
+ return;
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_trigger);
+
+ spin_lock(&dev_priv->irq_lock);
+ for (i = 1; i < HPD_NUM_PINS; i++) {
+
+ if (hpd[i] & hotplug_trigger &&
+ dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
+ /*
+ * On GMCH platforms the interrupt mask bits only
+ * prevent irq generation, not the setting of the
+ * hotplug bits itself. So only WARN about unexpected
+ * interrupts on saner platforms.
+ */
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+ "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+ hotplug_trigger, i, hpd[i]);
+
+ continue;
+ }
+
+ if (!(hpd[i] & hotplug_trigger) ||
+ dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
+ continue;
+
+ dev_priv->hpd_event_bits |= (1 << i);
+ if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
+ dev_priv->hpd_stats[i].hpd_last_jiffies
+ + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
+ dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
+ dev_priv->hpd_stats[i].hpd_cnt = 0;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
+ } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
+ dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
+ dev_priv->hpd_event_bits &= ~(1 << i);
+ DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
+ storm_detected = true;
+ } else {
+ dev_priv->hpd_stats[i].hpd_cnt++;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
+ dev_priv->hpd_stats[i].hpd_cnt);
+ }
+ }
+
+ if (storm_detected)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock(&dev_priv->irq_lock);
+
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ schedule_work(&dev_priv->hotplug_work);
+}
+
+static void gmbus_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static void dp_aux_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+ uint32_t crc0, uint32_t crc1,
+ uint32_t crc2, uint32_t crc3,
+ uint32_t crc4)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_pipe_crc_entry *entry;
+ int head, tail;
+
+ spin_lock(&pipe_crc->lock);
+
+ if (!pipe_crc->entries) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("spurious interrupt\n");
+ return;
+ }
+
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
+
+ if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("CRC buffer overflowing\n");
+ return;
+ }
+
+ entry = &pipe_crc->entries[head];
+
+ entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+ entry->crc[0] = crc0;
+ entry->crc[1] = crc1;
+ entry->crc[2] = crc2;
+ entry->crc[3] = crc3;
+ entry->crc[4] = crc4;
+
+ head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ pipe_crc->head = head;
+
+ spin_unlock(&pipe_crc->lock);
+
+ wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+ uint32_t crc0, uint32_t crc1,
+ uint32_t crc2, uint32_t crc3,
+ uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ 0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+ I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t res1, res2;
+
+ if (INTEL_INFO(dev)->gen >= 3)
+ res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+ else
+ res1 = 0;
+
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+ else
+ res2 = 0;
+
+ display_pipe_crc_irq_handler(dev, pipe,
+ I915_READ(PIPE_CRC_RES_RED(pipe)),
+ I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+ I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+ res1, res2);
+}
+
+/* The RPS events need forcewake, so we add them to a work queue and mask their
+ * IMR bits until the work is done. Other interrupts can be processed without
+ * the work queue. */
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ if (pm_iir & dev_priv->pm_rps_events) {
+ spin_lock(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ spin_unlock(&dev_priv->irq_lock);
+
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+ }
+
+ if (HAS_VEBOX(dev_priv->dev)) {
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+ i915_handle_error(dev_priv->dev, false,
+ "VEBOX CS error interrupt 0x%08x",
+ pm_iir);
+ }
+ }
+}
+
+static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_crtc *crtc;
+
+ if (!drm_handle_vblank(dev, pipe))
+ return false;
+
+ crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ wake_up(&crtc->vbl_wait);
+
+ return true;
+}
+
+static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pipe_stats[I915_MAX_PIPES] = { };
+ int pipe;
+
+ spin_lock(&dev_priv->irq_lock);
+ for_each_pipe(pipe) {
+ int reg;
+ u32 mask, iir_bit = 0;
+
+ /*
+ * PIPESTAT bits get signalled even when the interrupt is
+ * disabled with the mask bits, and some of the status bits do
+ * not generate interrupts at all (like the underrun bit). Hence
+ * we need to be careful that we only handle what we want to
+ * handle.
+ */
+ mask = 0;
+ if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
+ mask |= PIPE_FIFO_UNDERRUN_STATUS;
+
+ switch (pipe) {
+ case PIPE_A:
+ iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ break;
+ case PIPE_B:
+ iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ break;
+ case PIPE_C:
+ iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ break;
+ }
+ if (iir & iir_bit)
+ mask |= dev_priv->pipestat_irq_mask[pipe];
+
+ if (!mask)
+ continue;
+
+ reg = PIPESTAT(pipe);
+ mask |= PIPESTAT_INT_ENABLE_MASK;
+ pipe_stats[pipe] = I915_READ(reg) & mask;
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
+ PIPESTAT_INT_STATUS_MASK))
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ spin_unlock(&dev_priv->irq_lock);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+ intel_pipe_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+}
+
+static void i9xx_hpd_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ if (IS_G4X(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+ }
+
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
+}
+
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 iir, gt_iir, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+
+ while (true) {
+ iir = I915_READ(VLV_IIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+ goto out;
+
+ ret = IRQ_HANDLED;
+
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ valleyview_pipestat_irq_handler(dev, iir);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+
+ if (pm_iir)
+ gen6_rps_irq_handler(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ I915_WRITE(VLV_IIR, iir);
+ }
+
+out:
+ return ret;
+}
+
+static irqreturn_t cherryview_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 master_ctl, iir;
+ irqreturn_t ret = IRQ_NONE;
for (;;) {
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = I915_READ(VLV_IIR);
+
+ if (master_ctl == 0 && iir == 0)
break;
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+
+ gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+
+ valleyview_pipestat_irq_handler(dev, iir);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ i9xx_hpd_irq_handler(dev);
+
+ I915_WRITE(VLV_IIR, iir);
+
+ I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK) {
+ int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
+ SDE_AUDIO_POWER_SHIFT);
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ port_name(port));
+ }
+
+ if (pch_iir & SDE_AUX_MASK)
+ dp_aux_irq_handler(dev);
+
+ if (pch_iir & SDE_GMBUS)
+ gmbus_irq_handler(dev);
+
+ if (pch_iir & SDE_AUDIO_HDCP_MASK)
+ DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_TRANS_MASK)
+ DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+ if (pch_iir & SDE_POISON)
+ DRM_ERROR("PCH poison interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+ if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+ if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+ false))
+ DRM_ERROR("PCH transcoder A FIFO underrun\n");
+
+ if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+ false))
+ DRM_ERROR("PCH transcoder B FIFO underrun\n");
+}
+
+static void ivb_err_int_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 err_int = I915_READ(GEN7_ERR_INT);
+ enum pipe pipe;
+
+ if (err_int & ERR_INT_POISON)
+ DRM_ERROR("Poison interrupt\n");
+
+ for_each_pipe(pipe) {
+ if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
+
+ if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+ if (IS_IVYBRIDGE(dev))
+ ivb_pipe_crc_irq_handler(dev, pipe);
+ else
+ hsw_pipe_crc_irq_handler(dev, pipe);
+ }
+ }
+
+ I915_WRITE(GEN7_ERR_INT, err_int);
+}
+
+static void cpt_serr_int_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 serr_int = I915_READ(SERR_INT);
+
+ if (serr_int & SERR_INT_POISON)
+ DRM_ERROR("PCH poison interrupt\n");
+
+ if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+ false))
+ DRM_ERROR("PCH transcoder A FIFO underrun\n");
+
+ if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+ false))
+ DRM_ERROR("PCH transcoder B FIFO underrun\n");
+
+ if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
+ false))
+ DRM_ERROR("PCH transcoder C FIFO underrun\n");
+
+ I915_WRITE(SERR_INT, serr_int);
+}
+
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
+ int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+ SDE_AUDIO_POWER_SHIFT_CPT);
+ DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
+ port_name(port));
+ }
+
+ if (pch_iir & SDE_AUX_MASK_CPT)
+ dp_aux_irq_handler(dev);
+
+ if (pch_iir & SDE_GMBUS_CPT)
+ gmbus_irq_handler(dev);
+
+ if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+ DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+ DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK_CPT)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ if (pch_iir & SDE_ERROR_CPT)
+ cpt_serr_int_handler(dev);
+}
+
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE)
+ intel_opregion_asle_intr(dev);
+
+ if (de_iir & DE_POISON)
+ DRM_ERROR("Poison interrupt\n");
+
+ for_each_pipe(pipe) {
+ if (de_iir & DE_PIPE_VBLANK(pipe))
+ intel_pipe_handle_vblank(dev, pipe);
+
+ if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+
+ if (de_iir & DE_PIPE_CRC_DONE(pipe))
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ /* plane/pipes map 1:1 on ilk+ */
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
+ }
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
- new_pch_iir = I915_READ(SDEIIR);
+ }
- I915_WRITE(DEIIR, de_iir);
- new_de_iir = I915_READ(DEIIR);
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_rps_change_irq_handler(dev);
+}
+
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+
+ if (de_iir & DE_ERR_INT_IVB)
+ ivb_err_int_handler(dev);
+
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_asle_intr(dev);
+
+ for_each_pipe(pipe) {
+ if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ intel_pipe_handle_vblank(dev, pipe);
+
+ /* plane/pipes map 1:1 on ilk+ */
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
+ }
+
+ /* check event from PCH */
+ if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+}
+
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* We get interrupts on unclaimed registers, so check for this before we
+ * do any I915_{READ,WRITE}. */
+ intel_uncore_check_errors(dev);
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
+
+ /* Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will will be stored on its back queue, and then we'll be
+ * able to process them after we restore SDEIER (as soon as we restore
+ * it, we'll get an interrupt if SDEIIR still has something to process
+ * due to its back queue). */
+ if (!HAS_PCH_NOP(dev)) {
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+ }
+
+ gt_iir = I915_READ(GTIIR);
+ if (gt_iir) {
+ if (INTEL_INFO(dev)->gen >= 6)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
- new_gt_iir = I915_READ(GTIIR);
+ ret = IRQ_HANDLED;
+ }
+
+ de_iir = I915_READ(DEIIR);
+ if (de_iir) {
+ if (INTEL_INFO(dev)->gen >= 7)
+ ivb_display_irq_handler(dev, de_iir);
+ else
+ ilk_display_irq_handler(dev, de_iir);
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ gen6_rps_irq_handler(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ I915_WRITE(DEIER, de_ier);
+ POSTING_READ(DEIER);
+ if (!HAS_PCH_NOP(dev)) {
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
+ }
+
+ return ret;
+}
+
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 master_ctl;
+ irqreturn_t ret = IRQ_NONE;
+ uint32_t tmp = 0;
+ enum pipe pipe;
+
+ master_ctl = I915_READ(GEN8_MASTER_IRQ);
+ master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+ if (!master_ctl)
+ return IRQ_NONE;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+
+ if (master_ctl & GEN8_DE_MISC_IRQ) {
+ tmp = I915_READ(GEN8_DE_MISC_IIR);
+ if (tmp & GEN8_DE_MISC_GSE)
+ intel_opregion_asle_intr(dev);
+ else if (tmp)
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
+ else
+ DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
+ if (tmp) {
+ I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+ ret = IRQ_HANDLED;
}
+ }
+
+ if (master_ctl & GEN8_DE_PORT_IRQ) {
+ tmp = I915_READ(GEN8_DE_PORT_IIR);
+ if (tmp & GEN8_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+ else if (tmp)
+ DRM_ERROR("Unexpected DE Port interrupt\n");
+ else
+ DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
- if (gt_iir & GT_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ if (tmp) {
+ I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+ ret = IRQ_HANDLED;
}
+ }
+
+ for_each_pipe(pipe) {
+ uint32_t pipe_iir;
+
+ if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
+ continue;
- if (de_iir & DE_GSE)
- ironlake_opregion_gse_intr(dev);
+ pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ if (pipe_iir & GEN8_PIPE_VBLANK)
+ intel_pipe_handle_vblank(dev, pipe);
+
+ if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
+
+ if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
- /* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) &&
- (pch_iir & SDE_HOTPLUG_MASK)) {
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
}
- de_iir = new_de_iir;
- gt_iir = new_gt_iir;
- pch_iir = new_pch_iir;
+ if (pipe_iir) {
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ } else
+ DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
- I915_WRITE(DEIER, de_ier);
- (void)I915_READ(DEIER);
+ if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
+ /*
+ * FIXME(BDW): Assume for now that the new interrupt handling
+ * scheme also closed the SDE interrupt handling race we've seen
+ * on older pch-split platforms. But this needs testing.
+ */
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ if (pch_iir) {
+ I915_WRITE(SDEIIR, pch_iir);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ POSTING_READ(GEN8_MASTER_IRQ);
return ret;
}
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+ bool reset_completed)
+{
+ struct intel_engine_cs *ring;
+ int i;
+
+ /*
+ * Notify all waiters for GPU completion events that reset state has
+ * been changed, and that they need to restart their wait after
+ * checking for potential errors (and bail out to drop locks if there is
+ * a gpu reset pending so that i915_error_work_func can acquire them).
+ */
+
+ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+
+ /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+ wake_up_all(&dev_priv->pending_flip_queue);
+
+ /*
+ * Signal tasks blocked in i915_gem_wait_for_error that the pending
+ * reset state is cleared.
+ */
+ if (reset_completed)
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
/**
* i915_error_work_func - do process context error handling work
* @work: work struct
@@ -346,191 +2344,161 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
*/
static void i915_error_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- error_work);
+ struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+ work);
+ struct drm_i915_private *dev_priv =
+ container_of(error, struct drm_i915_private, gpu_error);
struct drm_device *dev = dev_priv->dev;
- char *error_event[] = { "ERROR=1", NULL };
- char *reset_event[] = { "RESET=1", NULL };
- char *reset_done_event[] = { "ERROR=0", NULL };
-
- DRM_DEBUG_DRIVER("generating error event\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
-
- if (atomic_read(&dev_priv->mm.wedged)) {
- if (IS_I965G(dev)) {
- DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i965_reset(dev, GDRST_RENDER)) {
- atomic_set(&dev_priv->mm.wedged, 0);
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
- }
- } else {
- DRM_DEBUG_DRIVER("reboot required\n");
- }
- }
-}
+ char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+ char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+ char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+ int ret;
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-static void i915_capture_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (dev_priv->first_error)
- goto out;
+ /*
+ * Note that there's only one work item which does gpu resets, so we
+ * need not worry about concurrent gpu resets potentially incrementing
+ * error->reset_counter twice. We only need to take care of another
+ * racing irq/hangcheck declaring the gpu dead for a second time. A
+ * quick check for that is good enough: schedule_work ensures the
+ * correct ordering between hang detection and this work item, and since
+ * the reset in-progress bit is only ever set by code outside of this
+ * work we don't need to worry about any other races.
+ */
+ if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+ reset_event);
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
- if (!error) {
- DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
- goto out;
- }
+ /*
+ * In most cases it's guaranteed that we get here with an RPM
+ * reference held, for example because there is a pending GPU
+ * request that won't finish until the reset is done. This
+ * isn't the case at least when we get here by doing a
+ * simulated reset via debugs, so get an RPM reference.
+ */
+ intel_runtime_pm_get(dev_priv);
+ /*
+ * All state reset _must_ be completed before we update the
+ * reset counter, for otherwise waiters might miss the reset
+ * pending state and not properly drop locks, resulting in
+ * deadlocks with the reset work.
+ */
+ ret = i915_reset(dev);
+
+ intel_display_handle_reset(dev);
+
+ intel_runtime_pm_put(dev_priv);
+
+ if (ret == 0) {
+ /*
+ * After all the gem state is reset, increment the reset
+ * counter and wake up everyone waiting for the reset to
+ * complete.
+ *
+ * Since unlock operations are a one-sided barrier only,
+ * we need to insert a barrier here to order any seqno
+ * updates before
+ * the counter increment.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&dev_priv->gpu_error.reset_counter);
+
+ kobject_uevent_env(&dev->primary->kdev->kobj,
+ KOBJ_CHANGE, reset_done_event);
+ } else {
+ atomic_set_mask(I915_WEDGED, &error->reset_counter);
+ }
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
- error->pipeastat = I915_READ(PIPEASTAT);
- error->pipebstat = I915_READ(PIPEBSTAT);
- error->instpm = I915_READ(INSTPM);
- if (!IS_I965G(dev)) {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- } else {
- error->ipeir = I915_READ(IPEIR_I965);
- error->ipehr = I915_READ(IPEHR_I965);
- error->instdone = I915_READ(INSTDONE_I965);
- error->instps = I915_READ(INSTPS);
- error->instdone1 = I915_READ(INSTDONE1);
- error->acthd = I915_READ(ACTHD_I965);
+ /*
+ * Note: The wake_up also serves as a memory barrier so that
+ * waiters see the update value of the reset counter atomic_t.
+ */
+ i915_error_wake_up(dev_priv, true);
}
-
- do_gettimeofday(&error->time);
-
- dev_priv->first_error = error;
-
-out:
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
}
-/**
- * i915_handle_error - handle an error interrupt
- * @dev: drm device
- *
- * Do some basic checking of regsiter state at error interrupt time and
- * dump it to the syslog. Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs. Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
+ int pipe, i;
- i915_capture_error_state(dev);
+ if (!eir)
+ return;
+
+ pr_err("render error detected, EIR: 0x%08x\n", eir);
- printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
- eir);
+ i915_get_extra_instdone(dev, instdone);
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- printk(KERN_ERR "memory refresh error\n");
- printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
- pipea_stats);
- printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
- pipeb_stats);
+ pr_err("memory refresh error:\n");
+ for_each_pipe(pipe)
+ pr_err("pipe %c stat: 0x%08x\n",
+ pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
- printk(KERN_ERR "instruction error\n");
- printk(KERN_ERR " INSTPM: 0x%08x\n",
- I915_READ(INSTPM));
- if (!IS_I965G(dev)) {
+ pr_err("instruction error\n");
+ pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+ if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD));
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
- (void)I915_READ(IPEIR);
+ POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
}
I915_WRITE(EIR, eir);
- (void)I915_READ(EIR);
+ POSTING_READ(EIR);
eir = I915_READ(EIR);
if (eir) {
/*
@@ -541,599 +2509,1592 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_device *dev, bool wedged,
+ const char *fmt, ...)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ va_list args;
+ char error_msg[80];
+
+ va_start(args, fmt);
+ vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+ va_end(args);
+
+ i915_capture_error_state(dev, wedged, error_msg);
+ i915_report_and_clear_eir(dev);
if (wedged) {
- atomic_set(&dev_priv->mm.wedged, 1);
+ atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+ &dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so they don't hang
+ * Wakeup waiting processes so that the reset work function
+ * i915_error_work_func doesn't deadlock trying to grab various
+ * locks. By bumping the reset counter first, the woken
+ * processes will see a reset in progress and back off,
+ * releasing their locks and then wait for the reset completion.
+ * We must do this for _all_ gpu waiters that might hold locks
+ * that the reset work needs to acquire.
+ *
+ * Note: The wake_up serves as the required memory barrier to
+ * ensure that the waiters see the updated value of the reset
+ * counter atomic_t.
*/
- DRM_WAKEUP(&dev_priv->irq_queue);
+ i915_error_wake_up(dev_priv, false);
}
- queue_work(dev_priv->wq, &dev_priv->error_work);
+ /*
+ * Our reset work can grab modeset locks (since it needs to reset the
+ * state of outstanding pagelips). Hence it must not be run on our own
+ * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+ * code will deadlock.
+ */
+ schedule_work(&dev_priv->gpu_error.work);
}
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv;
- u32 iir, new_iir;
- u32 pipea_stats, pipeb_stats;
- u32 vblank_status;
- u32 vblank_enable;
- int vblank = 0;
- unsigned long irqflags;
- int irq_received;
- int ret = IRQ_NONE;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ bool stall_detected;
- atomic_inc(&dev_priv->irq_received);
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
- if (IS_IRONLAKE(dev))
- return ironlake_irq_handler(dev);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
- iir = I915_READ(IIR);
+ if (work == NULL ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+ !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
- if (IS_I965G(dev)) {
- vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
- vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj = work->pending_flip_obj;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int dspsurf = DSPSURF(intel_crtc->plane);
+ stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+ i915_gem_obj_ggtt_offset(obj);
} else {
- vblank_status = I915_VBLANK_INTERRUPT_STATUS;
- vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
+ int dspaddr = DSPADDR(intel_crtc->plane);
+ stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
+ crtc->y * crtc->primary->fb->pitches[0] +
+ crtc->x * crtc->primary->fb->bits_per_pixel/8);
}
- for (;;) {
- irq_received = iir != 0;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
+ if (stall_detected) {
+ DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+ intel_prepare_page_flip(dev, intel_crtc->plane);
+ }
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
+ else
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK(pipe);
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv, bit);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int gen8_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ return 0;
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_STATUS |
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK(pipe);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_disable_display_irq(dev_priv, bit);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void gen8_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static u32
+ring_last_seqno(struct intel_engine_cs *ring)
+{
+ return list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
+{
+ return (list_empty(&ring->request_list) ||
+ i915_seqno_passed(seqno, ring_last_seqno(ring)));
+}
+
+static bool
+ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+{
+ if (INTEL_INFO(dev)->gen >= 8) {
+ /*
+ * FIXME: gen8 semaphore support - currently we don't emit
+ * semaphores on bdw anyway, but this needs to be addressed when
+ * we merge that code.
*/
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- pipea_stats = I915_READ(PIPEASTAT);
- pipeb_stats = I915_READ(PIPEBSTAT);
+ return false;
+ } else {
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
+ }
+}
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *signaller;
+ int i;
+ if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
/*
- * Clear the PIPE(A|B)STAT regs before the IIR
+ * FIXME: gen8 semaphore support - currently we don't emit
+ * semaphores on bdw anyway, but this needs to be addressed when
+ * we merge that code.
*/
- if (pipea_stats & 0x8000ffff) {
- if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe a underrun\n");
- I915_WRITE(PIPEASTAT, pipea_stats);
- irq_received = 1;
- }
+ return NULL;
+ } else {
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+ for_each_ring(signaller, dev_priv, i) {
+ if(ring == signaller)
+ continue;
- if (pipeb_stats & 0x8000ffff) {
- if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe b underrun\n");
- I915_WRITE(PIPEBSTAT, pipeb_stats);
- irq_received = 1;
+ if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+ return signaller;
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ }
- if (!irq_received)
+ DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
+ ring->id, ipehr);
+
+ return NULL;
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 cmd, ipehr, head;
+ int i;
+
+ ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+ if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+ return NULL;
+
+ /*
+ * HEAD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX. But limit it to just 3
+ * dwords. Note that we don't care about ACTHD here since that might
+ * point at at batch, and semaphores are always emitted into the
+ * ringbuffer itself.
+ */
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ for (i = 4; i; --i) {
+ /*
+ * Be paranoid and presume the hw has gone off into the wild -
+ * our ring is smaller than what the hardware (and hence
+ * HEAD_ADDR) allows. Also handles wrap-around.
+ */
+ head &= ring->buffer->size - 1;
+
+ /* This here seems to blow up */
+ cmd = ioread32(ring->buffer->virtual_start + head);
+ if (cmd == ipehr)
break;
- ret = IRQ_HANDLED;
+ head -= 4;
+ }
- /* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
- }
+ if (!i)
+ return NULL;
- I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
+ *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
+ return semaphore_wait_to_signaller_ring(ring, ipehr);
+}
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
+static int semaphore_passed(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *signaller;
+ u32 seqno;
- if (iir & I915_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- }
+ ring->hangcheck.deadlock++;
+
+ signaller = semaphore_waits_for(ring, &seqno);
+ if (signaller == NULL)
+ return -1;
+
+ /* Prevent pathological recursion due to driver bugs */
+ if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+ return -1;
+
+ if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+ return 1;
+
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
+ return -1;
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
- intel_prepare_page_flip(dev, 0);
+ return 0;
+}
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
- intel_prepare_page_flip(dev, 1);
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *ring;
+ int i;
- if (pipea_stats & vblank_status) {
- vblank++;
- drm_handle_vblank(dev, 0);
- intel_finish_page_flip(dev, 0);
+ for_each_ring(ring, dev_priv, i)
+ ring->hangcheck.deadlock = 0;
+}
+
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ if (ring->hangcheck.acthd != acthd)
+ return HANGCHECK_ACTIVE;
+
+ if (IS_GEN2(dev))
+ return HANGCHECK_HUNG;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ i915_handle_error(dev, false,
+ "Kicking stuck wait on %s",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return HANGCHECK_KICK;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ switch (semaphore_passed(ring)) {
+ default:
+ return HANGCHECK_HUNG;
+ case 1:
+ i915_handle_error(dev, false,
+ "Kicking stuck semaphore on %s",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return HANGCHECK_KICK;
+ case 0:
+ return HANGCHECK_WAIT;
}
+ }
+
+ return HANGCHECK_HUNG;
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
+ */
+static void i915_hangcheck_elapsed(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+ int busy_count = 0, rings_hung = 0;
+ bool stuck[I915_NUM_RINGS] = { 0 };
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+
+ if (!i915.enable_hangcheck)
+ return;
+
+ for_each_ring(ring, dev_priv, i) {
+ u64 acthd;
+ u32 seqno;
+ bool busy = true;
+
+ semaphore_clear_deadlocks(dev_priv);
+
+ seqno = ring->get_seqno(ring, false);
+ acthd = intel_ring_get_active_head(ring);
+
+ if (ring->hangcheck.seqno == seqno) {
+ if (ring_idle(ring, seqno)) {
+ ring->hangcheck.action = HANGCHECK_IDLE;
+
+ if (waitqueue_active(&ring->irq_queue)) {
+ /* Issue a wake-up to catch stuck h/w. */
+ if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+ if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
+ else
+ DRM_INFO("Fake missed irq on %s\n",
+ ring->name);
+ wake_up_all(&ring->irq_queue);
+ }
+ /* Safeguard against driver failure */
+ ring->hangcheck.score += BUSY;
+ } else
+ busy = false;
+ } else {
+ /* We always increment the hangcheck score
+ * if the ring is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * ring is in a legitimate wait for another
+ * ring. In that case the waiting ring is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, add a small increment to the
+ * score so that we can catch a batch that is
+ * being repeatedly kicked and so responsible
+ * for stalling the machine.
+ */
+ ring->hangcheck.action = ring_stuck(ring,
+ acthd);
+
+ switch (ring->hangcheck.action) {
+ case HANGCHECK_IDLE:
+ case HANGCHECK_WAIT:
+ break;
+ case HANGCHECK_ACTIVE:
+ ring->hangcheck.score += BUSY;
+ break;
+ case HANGCHECK_KICK:
+ ring->hangcheck.score += KICK;
+ break;
+ case HANGCHECK_HUNG:
+ ring->hangcheck.score += HUNG;
+ stuck[i] = true;
+ break;
+ }
+ }
+ } else {
+ ring->hangcheck.action = HANGCHECK_ACTIVE;
- if (pipeb_stats & vblank_status) {
- vblank++;
- drm_handle_vblank(dev, 1);
- intel_finish_page_flip(dev, 1);
+ /* Gradually reduce the count so that we catch DoS
+ * attempts across multiple batches.
+ */
+ if (ring->hangcheck.score > 0)
+ ring->hangcheck.score--;
}
- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
- (iir & I915_ASLE_INTERRUPT))
- opregion_asle_intr(dev);
+ ring->hangcheck.seqno = seqno;
+ ring->hangcheck.acthd = acthd;
+ busy_count += busy;
+ }
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- iir = new_iir;
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ DRM_INFO("%s on %s\n",
+ stuck[i] ? "stuck" : "no progress",
+ ring->name);
+ rings_hung++;
+ }
}
- return ret;
+ if (rings_hung)
+ return i915_handle_error(dev, true, "Ring hung");
+
+ if (busy_count)
+ /* Reset timer case chip hangs without another request
+ * being added */
+ i915_queue_hangcheck(dev);
}
-static int i915_emit_irq(struct drm_device * dev)
+void i915_queue_hangcheck(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!i915.enable_hangcheck)
+ return;
- i915_kernel_lost_context(dev);
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+}
- DRM_DEBUG_DRIVER("\n");
+static void ibx_irq_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ if (HAS_PCH_NOP(dev))
+ return;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ GEN5_IRQ_RESET(SDE);
- return dev_priv->counter;
+ if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ I915_WRITE(SERR_INT, 0xffffffff);
}
-void i915_user_irq_get(struct drm_device *dev)
+/*
+ * SDEIER is also touched by the interrupt handler to work around missed PCH
+ * interrupts. Hence we can't update it after the interrupt handler is enabled -
+ * instead we unconditionally enable all PCH interrupt sources here, but then
+ * only unmask them as needed with SDEIMR.
+ *
+ * This function needs to be called before interrupts are enabled.
+ */
+static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (IS_IRONLAKE(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ if (HAS_PCH_NOP(dev))
+ return;
+
+ WARN_ON(I915_READ(SDEIER) != 0);
+ I915_WRITE(SDEIER, 0xffffffff);
+ POSTING_READ(SDEIER);
}
-void i915_user_irq_put(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IRONLAKE(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ GEN5_IRQ_RESET(GT);
+ if (INTEL_INFO(dev)->gen >= 6)
+ GEN5_IRQ_RESET(GEN6_PM);
}
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_reset(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ GEN5_IRQ_RESET(DE);
+ if (IS_GEN7(dev))
+ I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- if (dev_priv->trace_irq_seqno == 0)
- i915_user_irq_get(dev);
+ gen5_gt_irq_reset(dev);
- dev_priv->trace_irq_seqno = seqno;
+ ibx_irq_reset(dev);
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static void valleyview_irq_preinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
+ /* VLV magic */
+ I915_WRITE(VLV_IMR, 0);
+ I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+ I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
+ /* and GT */
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ gen5_gt_irq_reset(dev);
- i915_user_irq_get(dev);
- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- i915_user_irq_put(dev);
+ I915_WRITE(DPINVGTT, 0xff);
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
- }
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
- return ret;
+static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
+{
+ GEN8_IRQ_RESET_NDX(GT, 0);
+ GEN8_IRQ_RESET_NDX(GT, 1);
+ GEN8_IRQ_RESET_NDX(GT, 2);
+ GEN8_IRQ_RESET_NDX(GT, 3);
}
-/* Needs the lock as it touches the ring.
- */
-int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void gen8_irq_reset(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
- if (!dev_priv || !dev_priv->ring.virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ gen8_gt_irq_reset(dev_priv);
+
+ for_each_pipe(pipe)
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
+ GEN5_IRQ_RESET(GEN8_DE_PORT_);
+ GEN5_IRQ_RESET(GEN8_DE_MISC_);
+ GEN5_IRQ_RESET(GEN8_PCU_);
+
+ ibx_irq_reset(dev);
+}
+
+static void cherryview_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+ gen8_gt_irq_reset(dev_priv);
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
+ GEN5_IRQ_RESET(GEN8_PCU_);
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
+ POSTING_READ(GEN8_PCU_IIR);
+
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ POSTING_READ(VLV_IIR);
+}
+
+static void ibx_hpd_irq_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *intel_encoder;
+ u32 hotplug_irqs, hotplug, enabled_irqs = 0;
+
+ if (HAS_PCH_IBX(dev)) {
+ hotplug_irqs = SDE_HOTPLUG_MASK;
+ list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
+ } else {
+ hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
+ list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
- return 0;
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ /*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
+ hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+ hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+ hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+ hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+ I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
-/* Doesn't need the hardware lock.
- */
-int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void ibx_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
+ if (HAS_PCH_NOP(dev))
+ return;
+
+ if (HAS_PCH_IBX(dev))
+ mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
+ else
+ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
+
+ GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
+ I915_WRITE(SDEIMR, ~mask);
+}
+
+static void gen5_gt_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pm_irqs, gt_irqs;
+
+ pm_irqs = gt_irqs = 0;
+
+ dev_priv->gt_irq_mask = ~0;
+ if (HAS_L3_DPF(dev)) {
+ /* L3 parity interrupt is always unmasked. */
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+ gt_irqs |= GT_PARITY_ERROR(dev);
}
- return i915_wait_irq(dev, irqwait->irq_seq);
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (IS_GEN5(dev)) {
+ gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
+ ILK_BSD_USER_INTERRUPT;
+ } else {
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+ }
+
+ GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ pm_irqs |= dev_priv->pm_rps_events;
+
+ if (HAS_VEBOX(dev))
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+
+ dev_priv->pm_irq_mask = 0xffffffff;
+ GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+ }
}
-/* Called from drm generic code, passed 'crtc' which
- * we use as a pipe index
- */
-int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 pipeconf;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 display_mask, extra_mask;
+
+ if (INTEL_INFO(dev)->gen >= 7) {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+ DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
+ extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
+ } else {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A |
+ DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+ DE_POISON);
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
+ }
- pipeconf = I915_READ(pipeconf_reg);
- if (!(pipeconf & PIPEACONF_ENABLE))
- return -EINVAL;
+ dev_priv->irq_mask = ~display_mask;
- if (IS_IRONLAKE(dev))
- return 0;
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ ibx_irq_pre_postinstall(dev);
+
+ GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+
+ gen5_gt_irq_postinstall(dev);
+
+ ibx_irq_postinstall(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ /* Enable PCU event interrupts
+ *
+ * spinlocking not required here for correctness since interrupt
+ * setup is guaranteed to run in single-threaded context. But we
+ * need it to make the assert_spin_locked happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_I965G(dev))
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
- else
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
return 0;
}
-/* Called from drm generic code, passed 'crtc' which
- * we use as a pipe index
- */
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ u32 pipestat_mask;
+ u32 iir_mask;
- if (IS_IRONLAKE(dev))
- return;
+ pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE |
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+ I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ POSTING_READ(PIPESTAT(PIPE_A));
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+ PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+ iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ dev_priv->irq_mask &= ~iir_mask;
+
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ POSTING_READ(VLV_IER);
}
-void i915_enable_interrupt (struct drm_device *dev)
+static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pipestat_mask;
+ u32 iir_mask;
+
+ iir_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+
+ dev_priv->irq_mask |= iir_mask;
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ I915_WRITE(VLV_IIR, iir_mask);
+ POSTING_READ(VLV_IIR);
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
+ PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+
+ pipestat_mask = PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS;
+ I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
+ I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ POSTING_READ(PIPESTAT(PIPE_A));
+}
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
- if (!IS_IRONLAKE(dev))
- opregion_enable_asle(dev);
- dev_priv->irq_enabled = 1;
+ if (dev_priv->display_irqs_enabled)
+ return;
+
+ dev_priv->display_irqs_enabled = true;
+
+ if (dev_priv->dev->irq_enabled)
+ valleyview_display_irqs_install(dev_priv);
}
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
-/* Set the vblank monitor pipe
- */
-int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ if (!dev_priv->display_irqs_enabled)
+ return;
+
+ dev_priv->display_irqs_enabled = false;
+
+ if (dev_priv->dev->irq_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+}
+
+static int valleyview_irq_postinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+ dev_priv->irq_mask = ~0;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ POSTING_READ(VLV_IER);
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_install(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+
+ gen5_gt_irq_postinstall(dev);
+
+ /* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
return 0;
}
-int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
+ int i;
+
+ /* These are interrupts we'll toggle with the ring mask register */
+ uint32_t gt_interrupts[] = {
+ GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ 0,
+ GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
+ };
+
+ for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
+ GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
+
+ dev_priv->pm_irq_mask = 0xffffffff;
+}
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
+ GEN8_PIPE_CDCLK_CRC_DONE |
+ GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+ uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
+ int pipe;
+ dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
+ dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
+ dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+
+ for_each_pipe(pipe)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
+ de_pipe_enables);
+
+ GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
+}
+
+static int gen8_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ ibx_irq_pre_postinstall(dev);
+
+ gen8_gt_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(dev_priv);
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+ ibx_irq_postinstall(dev);
+
+ I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(GEN8_MASTER_IRQ);
return 0;
}
-/**
- * Schedule buffer swap at given vertical blank.
- */
-int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
+static int cherryview_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+ unsigned long irqflags;
+ int pipe;
+
+ /*
+ * Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
*/
- return -EINVAL;
-}
+ dev_priv->irq_mask = ~enable_mask;
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+
+ gen8_gt_irq_postinstall(dev_priv);
+
+ I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+ POSTING_READ(GEN8_MASTER_IRQ);
-struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
- drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+ return 0;
}
-/**
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. The first time this is called we simply record
- * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
- * again, we assume the chip is wedged and try to fix it.
- */
-void i915_hangcheck_elapsed(unsigned long data)
+static void gen8_irq_uninstall(struct drm_device *dev)
{
- struct drm_device *dev = (struct drm_device *)data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd;
-
- if (!IS_I965G(dev))
- acthd = I915_READ(ACTHD);
- else
- acthd = I915_READ(ACTHD_I965);
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->mm.request_list) ||
- i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
- dev_priv->hangcheck_count = 0;
+ if (!dev_priv)
return;
- }
- if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
- DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
+ intel_hpd_irq_uninstall(dev_priv);
+
+ gen8_irq_reset(dev);
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ int pipe;
+
+ if (!dev_priv)
return;
- }
- /* Reset timer case chip hangs without another request being added */
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ I915_WRITE(VLV_MASTER_IER, 0);
- if (acthd != dev_priv->last_acthd)
- dev_priv->hangcheck_count = 0;
- else
- dev_priv->hangcheck_count++;
+ intel_hpd_irq_uninstall(dev_priv);
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ dev_priv->irq_mask = 0;
- dev_priv->last_acthd = acthd;
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
}
-/* drm_dma.h hooks
-*/
-static void ironlake_irq_preinstall(struct drm_device *dev)
+static void cherryview_irq_uninstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
- I915_WRITE(HWSTAM, 0xeffe);
+ if (!dev_priv)
+ return;
- /* XXX hotplug from PCH */
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+#define GEN8_IRQ_FINI_NDX(type, which) \
+do { \
+ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER(which), 0); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+} while (0)
+
+#define GEN8_IRQ_FINI(type) \
+do { \
+ I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER, 0); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+} while (0)
+
+ GEN8_IRQ_FINI_NDX(GT, 0);
+ GEN8_IRQ_FINI_NDX(GT, 1);
+ GEN8_IRQ_FINI_NDX(GT, 2);
+ GEN8_IRQ_FINI_NDX(GT, 3);
+
+ GEN8_IRQ_FINI(PCU);
+
+#undef GEN8_IRQ_FINI
+#undef GEN8_IRQ_FINI_NDX
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ POSTING_READ(VLV_IIR);
+}
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- (void) I915_READ(DEIER);
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* and GT */
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- (void) I915_READ(GTIER);
+ if (!dev_priv)
+ return;
+
+ intel_hpd_irq_uninstall(dev_priv);
- /* south display irq */
- I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
- (void) I915_READ(SDEIER);
+ ironlake_irq_reset(dev);
}
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void i8xx_irq_preinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
- u32 render_mask = GT_USER_INTERRUPT;
- u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
- dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask;
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ POSTING_READ16(IER);
+}
- /* should always can generate irq */
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
- /* user interrupt should be enabled, but masked initial */
- dev_priv->gt_irq_mask_reg = 0xffffffff;
- dev_priv->gt_irq_enable_reg = render_mask;
+ I915_WRITE16(EMR,
+ ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
- (void) I915_READ(GTIER);
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- dev_priv->pch_irq_mask_reg = ~hotplug_mask;
- dev_priv->pch_irq_enable_reg = hotplug_mask;
+ return 0;
+}
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
- I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
- (void) I915_READ(SDEIER);
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i8xx_handle_vblank(struct drm_device *dev,
+ int plane, int pipe, u32 iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- return 0;
+ if (!intel_pipe_handle_vblank(dev, pipe))
+ return false;
+
+ if ((iir & flip_pending) == 0)
+ return false;
+
+ intel_prepare_page_flip(dev, plane);
+
+ /* We detect FlipDone by looking for the change in PendingFlip from '1'
+ * to '0' on the following vblank, i.e. IIR has the Pendingflip
+ * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+ * the flip is completed (no longer pending). Since this doesn't raise
+ * an interrupt per se, we watch for the change at vblank.
+ */
+ if (I915_READ16(ISR) & flip_pending)
+ return false;
+
+ intel_finish_page_flip(dev, pipe);
+
+ return true;
}
-void i915_driver_irq_preinstall(struct drm_device * dev)
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 iir, new_iir;
+ u32 pipe_stats[2];
+ unsigned long irqflags;
+ int pipe;
+ u16 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- atomic_set(&dev_priv->irq_received, 0);
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return IRQ_NONE;
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ while (iir & ~flip_mask) {
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff)
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- if (IS_IRONLAKE(dev)) {
- ironlake_irq_preinstall(dev);
- return;
+ I915_WRITE16(IIR, iir & ~flip_mask);
+ new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+ i915_update_dri1_breadcrumb(dev);
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (HAS_FBC(dev))
+ plane = !plane;
+
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ i8xx_handle_vblank(dev, plane, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+
+ iir = new_iir;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
}
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
- I915_WRITE(HWSTAM, 0xeffe);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ I915_WRITE16(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enable_mask;
+ unsigned long irqflags;
+
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ i915_enable_asle_pipestat(dev);
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
}
/*
- * Must be called after intel_modeset_init or hotplug interrupts won't be
- * enabled correctly.
+ * Returns true when a page flip has completed.
*/
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static bool i915_handle_vblank(struct drm_device *dev,
+ int plane, int pipe, u32 iir)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
- u32 error_mask;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+ if (!intel_pipe_handle_vblank(dev, pipe))
+ return false;
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+ if ((iir & flip_pending) == 0)
+ return false;
- if (IS_IRONLAKE(dev))
- return ironlake_irq_postinstall(dev);
+ intel_prepare_page_flip(dev, plane);
- /* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+ /* We detect FlipDone by looking for the change in PendingFlip from '1'
+ * to '0' on the following vblank, i.e. IIR has the Pendingflip
+ * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+ * the flip is completed (no longer pending). Since this doesn't raise
+ * an interrupt per se, we watch for the change at vblank.
+ */
+ if (I915_READ(ISR) & flip_pending)
+ return false;
- dev_priv->pipestat[0] = 0;
- dev_priv->pipestat[1] = 0;
+ intel_finish_page_flip(dev, pipe);
- if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- /* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
- hotplug_en |= CRT_HOTPLUG_INT_EN;
- /* Ignore TV since it's buggy */
+ return true;
+}
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+static irqreturn_t i915_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ int pipe, ret = IRQ_NONE;
- /* Enable in IER... */
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- /* and unmask in IMR */
- i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+ bool blc_event = false;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /* Clear the PIPE*STAT regs before the IIR */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (I915_HAS_HOTPLUG(dev) &&
+ iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (HAS_FBC(dev))
+ plane = !plane;
+
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ i915_handle_vblank(dev, plane, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ ret = IRQ_HANDLED;
+ iir = new_iir;
+ } while (iir & ~flip_mask);
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ intel_hpd_irq_uninstall(dev_priv);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xffff);
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
}
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ I915_WRITE(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+static int i965_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enable_mask;
+ u32 error_mask;
+ unsigned long irqflags;
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask = ~dev_priv->irq_mask;
+ enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ enable_mask |= I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
* Enable some error detection, note the instruction error mask
@@ -1150,61 +4111,348 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- /* Disable pipe interrupt enables, clear pending pipe status */
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
- /* Clear pending interrupt status */
- I915_WRITE(IIR, I915_READ(IIR));
-
+ I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
- opregion_enable_asle(dev);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ i915_enable_asle_pipestat(dev);
return 0;
}
-static void ironlake_irq_uninstall(struct drm_device *dev)
+static void i915_hpd_irq_setup(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- I915_WRITE(HWSTAM, 0xffffffff);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *intel_encoder;
+ u32 hotplug_en;
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
+ assert_spin_locked(&dev_priv->irq_lock);
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
+ if (I915_HAS_HOTPLUG(dev)) {
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~HOTPLUG_INT_EN_MASK;
+ /* Note HDMI and DP share hotplug bits */
+ /* enable bits are the same for all generations */
+ list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ /* Ignore TV since it's buggy */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
}
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static irqreturn_t i965_irq_handler(int irq, void *arg)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ int ret = IRQ_NONE, pipe;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
- if (!dev_priv)
- return;
+ iir = I915_READ(IIR);
- dev_priv->vblank_pipe = 0;
+ for (;;) {
+ bool irq_received = (iir & ~flip_mask) != 0;
+ bool blc_event = false;
- if (IS_IRONLAKE(dev)) {
- ironlake_irq_uninstall(dev);
- return;
- }
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false,
+ "Command parser error, iir 0x%08x",
+ iir);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- if (I915_HAS_HOTPLUG(dev)) {
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ if (!irq_received)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+ i915_handle_vblank(dev, pipe, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
}
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i965_irq_uninstall(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ intel_hpd_irq_uninstall(dev_priv);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe),
+ I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
+
+static void intel_hpd_irq_reenable(unsigned long data)
+{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ unsigned long irqflags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
+ struct drm_connector *connector;
+
+ if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
+ continue;
+
+ dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (intel_connector->encoder->hpd_pin == i) {
+ if (connector->polled != intel_connector->polled)
+ DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+ connector->name);
+ connector->polled = intel_connector->polled;
+ if (!connector->polled)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+ }
+ }
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+void intel_irq_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+
+ /* Let's track the enabled rps events */
+ dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+
+ setup_timer(&dev_priv->gpu_error.hangcheck_timer,
+ i915_hangcheck_elapsed,
+ (unsigned long) dev);
+ setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
+ (unsigned long) dev_priv);
+
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+ if (IS_GEN2(dev)) {
+ dev->max_vblank_count = 0;
+ dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ } else {
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+ }
+
+ if (IS_CHERRYVIEW(dev)) {
+ dev->driver->irq_handler = cherryview_irq_handler;
+ dev->driver->irq_preinstall = cherryview_irq_preinstall;
+ dev->driver->irq_postinstall = cherryview_irq_postinstall;
+ dev->driver->irq_uninstall = cherryview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev->driver->irq_handler = valleyview_irq_handler;
+ dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_postinstall = valleyview_irq_postinstall;
+ dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (IS_GEN8(dev)) {
+ dev->driver->irq_handler = gen8_irq_handler;
+ dev->driver->irq_preinstall = gen8_irq_reset;
+ dev->driver->irq_postinstall = gen8_irq_postinstall;
+ dev->driver->irq_uninstall = gen8_irq_uninstall;
+ dev->driver->enable_vblank = gen8_enable_vblank;
+ dev->driver->disable_vblank = gen8_disable_vblank;
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev->driver->irq_handler = ironlake_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_reset;
+ dev->driver->irq_postinstall = ironlake_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ironlake_enable_vblank;
+ dev->driver->disable_vblank = ironlake_disable_vblank;
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+ } else {
+ if (INTEL_INFO(dev)->gen == 2) {
+ dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_postinstall = i8xx_irq_postinstall;
+ dev->driver->irq_handler = i8xx_irq_handler;
+ dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ } else if (INTEL_INFO(dev)->gen == 3) {
+ dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_handler = i915_irq_handler;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_postinstall = i965_irq_postinstall;
+ dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_handler = i965_irq_handler;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ }
+ dev->driver->enable_vblank = i915_enable_vblank;
+ dev->driver->disable_vblank = i915_disable_vblank;
+ }
+}
+
+void intel_hpd_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ unsigned long irqflags;
+ int i;
+
+ for (i = 1; i < HPD_NUM_PINS; i++) {
+ dev_priv->hpd_stats[i].hpd_cnt = 0;
+ dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+ }
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ connector->polled = intel_connector->polled;
+ if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked checks happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/* Disable interrupts so we can allow runtime PM. */
+void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev->driver->irq_uninstall(dev);
+ dev_priv->pm.irqs_disabled = true;
+}
+
+/* Restore interrupts so we can recover from runtime PM. */
+void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->pm.irqs_disabled = false;
+ dev->driver->irq_preinstall(dev);
+ dev->driver->irq_postinstall(dev);
+}
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
deleted file mode 100644
index 83b7b81bb2b..00000000000
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ /dev/null
@@ -1,387 +0,0 @@
-/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-/* This memory manager is integrated into the global/local lru
- * mechanisms used by the clients. Specifically, it operates by
- * setting the 'in_use' fields of the global LRU to indicate whether
- * this region is privately allocated to a client.
- *
- * This does require the client to actually respect that field.
- *
- * Currently no effort is made to allocate 'private' memory in any
- * clever way - the LRU information isn't used to determine which
- * block to allocate, and the ring is drained prior to allocations --
- * in other words allocation is expensive.
- */
-static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
- struct drm_tex_region *list;
- unsigned shift, nr;
- unsigned start;
- unsigned end;
- unsigned i;
- int age;
-
- shift = dev_priv->tex_lru_log_granularity;
- nr = I915_NR_TEX_REGIONS;
-
- start = p->start >> shift;
- end = (p->start + p->size - 1) >> shift;
-
- age = ++sarea_priv->texAge;
- list = sarea_priv->texList;
-
- /* Mark the regions with the new flag and update their age. Move
- * them to head of list to preserve LRU semantics.
- */
- for (i = start; i <= end; i++) {
- list[i].in_use = in_use;
- list[i].age = age;
-
- /* remove_from_list(i)
- */
- list[(unsigned)list[i].next].prev = list[i].prev;
- list[(unsigned)list[i].prev].next = list[i].next;
-
- /* insert_at_head(list, i)
- */
- list[i].prev = nr;
- list[i].next = list[nr].next;
- list[(unsigned)list[nr].next].prev = i;
- list[nr].next = i;
- }
-}
-
-/* Very simple allocator for agp memory, working on a static range
- * already mapped into each client's address space.
- */
-
-static struct mem_block *split_block(struct mem_block *p, int start, int size,
- struct drm_file *file_priv)
-{
- /* Maybe cut off the start of an existing block */
- if (start > p->start) {
- struct mem_block *newblock = kmalloc(sizeof(*newblock),
- GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start;
- newblock->size = p->size - (start - p->start);
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size -= newblock->size;
- p = newblock;
- }
-
- /* Maybe cut off the end of an existing block */
- if (size < p->size) {
- struct mem_block *newblock = kmalloc(sizeof(*newblock),
- GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start + size;
- newblock->size = p->size - size;
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size = size;
- }
-
- out:
- /* Our block is in the middle */
- p->file_priv = file_priv;
- return p;
-}
-
-static struct mem_block *alloc_block(struct mem_block *heap, int size,
- int align2, struct drm_file *file_priv)
-{
- struct mem_block *p;
- int mask = (1 << align2) - 1;
-
- for (p = heap->next; p != heap; p = p->next) {
- int start = (p->start + mask) & ~mask;
- if (p->file_priv == NULL && start + size <= p->start + p->size)
- return split_block(p, start, size, file_priv);
- }
-
- return NULL;
-}
-
-static struct mem_block *find_block(struct mem_block *heap, int start)
-{
- struct mem_block *p;
-
- for (p = heap->next; p != heap; p = p->next)
- if (p->start == start)
- return p;
-
- return NULL;
-}
-
-static void free_block(struct mem_block *p)
-{
- p->file_priv = NULL;
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- if (p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
-
- if (p->prev->file_priv == NULL) {
- struct mem_block *q = p->prev;
- q->size += p->size;
- q->next = p->next;
- q->next->prev = q;
- kfree(p);
- }
-}
-
-/* Initialize. How to check for an uninitialized heap?
- */
-static int init_heap(struct mem_block **heap, int start, int size)
-{
- struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
-
- if (!blocks)
- return -ENOMEM;
-
- *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
- if (!*heap) {
- kfree(blocks);
- return -ENOMEM;
- }
-
- blocks->start = start;
- blocks->size = size;
- blocks->file_priv = NULL;
- blocks->next = blocks->prev = *heap;
-
- memset(*heap, 0, sizeof(**heap));
- (*heap)->file_priv = (struct drm_file *) - 1;
- (*heap)->next = (*heap)->prev = blocks;
- return 0;
-}
-
-/* Free all blocks associated with the releasing file.
- */
-void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
- struct mem_block *heap)
-{
- struct mem_block *p;
-
- if (!heap || !heap->next)
- return;
-
- for (p = heap->next; p != heap; p = p->next) {
- if (p->file_priv == file_priv) {
- p->file_priv = NULL;
- mark_block(dev, p, 0);
- }
- }
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- for (p = heap->next; p != heap; p = p->next) {
- while (p->file_priv == NULL && p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
- }
-}
-
-/* Shutdown.
- */
-void i915_mem_takedown(struct mem_block **heap)
-{
- struct mem_block *p;
-
- if (!*heap)
- return;
-
- for (p = (*heap)->next; p != *heap;) {
- struct mem_block *q = p;
- p = p->next;
- kfree(q);
- }
-
- kfree(*heap);
- *heap = NULL;
-}
-
-static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
-{
- switch (region) {
- case I915_MEM_REGION_AGP:
- return &dev_priv->agp_heap;
- default:
- return NULL;
- }
-}
-
-/* IOCTL HANDLERS */
-
-int i915_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_alloc_t *alloc = data;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, alloc->region);
- if (!heap || !*heap)
- return -EFAULT;
-
- /* Make things easier on ourselves: all allocations at least
- * 4k aligned.
- */
- if (alloc->alignment < 12)
- alloc->alignment = 12;
-
- block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
-
- if (!block)
- return -ENOMEM;
-
- mark_block(dev, block, 1);
-
- if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
- sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-int i915_mem_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_free_t *memfree = data;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, memfree->region);
- if (!heap || !*heap)
- return -EFAULT;
-
- block = find_block(*heap, memfree->region_offset);
- if (!block)
- return -EFAULT;
-
- if (block->file_priv != file_priv)
- return -EPERM;
-
- mark_block(dev, block, 0);
- free_block(block);
- return 0;
-}
-
-int i915_mem_init_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_init_heap_t *initheap = data;
- struct mem_block **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, initheap->region);
- if (!heap)
- return -EFAULT;
-
- if (*heap) {
- DRM_ERROR("heap already initialized?");
- return -EFAULT;
- }
-
- return init_heap(heap, initheap->start, initheap->size);
-}
-
-int i915_mem_destroy_heap( struct drm_device *dev, void *data,
- struct drm_file *file_priv )
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_destroy_heap_t *destroyheap = data;
- struct mem_block **heap;
-
- if ( !dev_priv ) {
- DRM_ERROR( "called with no initialization\n" );
- return -EINVAL;
- }
-
- heap = get_heap( dev_priv, destroyheap->region );
- if (!heap) {
- DRM_ERROR("get_heap failed");
- return -EFAULT;
- }
-
- if (!*heap) {
- DRM_ERROR("heap not initialized?");
- return -EFAULT;
- }
-
- i915_mem_takedown( heap );
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
deleted file mode 100644
index 7cc8410239c..00000000000
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
- * Copyright 2008 Intel Corporation <hong.liu@intel.com>
- * Copyright 2008 Red Hat <mjg@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/acpi.h>
-#include <acpi/video.h>
-
-#include "drmP.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#define PCI_ASLE 0xe4
-#define PCI_LBPC 0xf4
-#define PCI_ASLS 0xfc
-
-#define OPREGION_SZ (8*1024)
-#define OPREGION_HEADER_OFFSET 0
-#define OPREGION_ACPI_OFFSET 0x100
-#define OPREGION_SWSCI_OFFSET 0x200
-#define OPREGION_ASLE_OFFSET 0x300
-#define OPREGION_VBT_OFFSET 0x1000
-
-#define OPREGION_SIGNATURE "IntelGraphicsMem"
-#define MBOX_ACPI (1<<0)
-#define MBOX_SWSCI (1<<1)
-#define MBOX_ASLE (1<<2)
-
-struct opregion_header {
- u8 signature[16];
- u32 size;
- u32 opregion_ver;
- u8 bios_ver[32];
- u8 vbios_ver[16];
- u8 driver_ver[16];
- u32 mboxes;
- u8 reserved[164];
-} __attribute__((packed));
-
-/* OpRegion mailbox #1: public ACPI methods */
-struct opregion_acpi {
- u32 drdy; /* driver readiness */
- u32 csts; /* notification status */
- u32 cevt; /* current event */
- u8 rsvd1[20];
- u32 didl[8]; /* supported display devices ID list */
- u32 cpdl[8]; /* currently presented display list */
- u32 cadl[8]; /* currently active display list */
- u32 nadl[8]; /* next active devices list */
- u32 aslp; /* ASL sleep time-out */
- u32 tidx; /* toggle table index */
- u32 chpd; /* current hotplug enable indicator */
- u32 clid; /* current lid state*/
- u32 cdck; /* current docking state */
- u32 sxsw; /* Sx state resume */
- u32 evts; /* ASL supported events */
- u32 cnot; /* current OS notification */
- u32 nrdy; /* driver status */
- u8 rsvd2[60];
-} __attribute__((packed));
-
-/* OpRegion mailbox #2: SWSCI */
-struct opregion_swsci {
- u32 scic; /* SWSCI command|status|data */
- u32 parm; /* command parameters */
- u32 dslp; /* driver sleep time-out */
- u8 rsvd[244];
-} __attribute__((packed));
-
-/* OpRegion mailbox #3: ASLE */
-struct opregion_asle {
- u32 ardy; /* driver readiness */
- u32 aslc; /* ASLE interrupt command */
- u32 tche; /* technology enabled indicator */
- u32 alsi; /* current ALS illuminance reading */
- u32 bclp; /* backlight brightness to set */
- u32 pfit; /* panel fitting state */
- u32 cblv; /* current brightness level */
- u16 bclm[20]; /* backlight level duty cycle mapping table */
- u32 cpfm; /* current panel fitting mode */
- u32 epfm; /* enabled panel fitting modes */
- u8 plut[74]; /* panel LUT and identifier */
- u32 pfmb; /* PWM freq and min brightness */
- u8 rsvd[102];
-} __attribute__((packed));
-
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM (1 << 0)
-#define ASLE_SET_BACKLIGHT (1 << 1)
-#define ASLE_SET_PFIT (1 << 2)
-#define ASLE_SET_PWM_FREQ (1 << 3)
-#define ASLE_REQ_MSK 0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAIL (2<<10)
-#define ASLE_BACKLIGHT_FAIL (2<<12)
-#define ASLE_PFIT_FAIL (2<<14)
-#define ASLE_PWM_FREQ_FAIL (2<<16)
-#define ASLE_ALS_ILLUM_FAILED (1<<10)
-#define ASLE_BACKLIGHT_FAILED (1<<12)
-#define ASLE_PFIT_FAILED (1<<14)
-#define ASLE_PWM_FREQ_FAILED (1<<16)
-
-/* ASLE backlight brightness to set */
-#define ASLE_BCLP_VALID (1<<31)
-#define ASLE_BCLP_MSK (~(1<<31))
-
-/* ASLE panel fitting request */
-#define ASLE_PFIT_VALID (1<<31)
-#define ASLE_PFIT_CENTER (1<<0)
-#define ASLE_PFIT_STRETCH_TEXT (1<<1)
-#define ASLE_PFIT_STRETCH_GFX (1<<2)
-
-/* PWM frequency and minimum brightness */
-#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
-#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
-#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
-#define ASLE_PFMB_PWM_VALID (1<<31)
-
-#define ASLE_CBLV_VALID (1<<31)
-
-#define ACPI_OTHER_OUTPUT (0<<8)
-#define ACPI_VGA_OUTPUT (1<<8)
-#define ACPI_TV_OUTPUT (2<<8)
-#define ACPI_DIGITAL_OUTPUT (3<<8)
-#define ACPI_LVDS_OUTPUT (4<<8)
-
-static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 blc_pwm_ctl, blc_pwm_ctl2;
- u32 max_backlight, level, shift;
-
- if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAIL;
-
- bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAIL;
-
- blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
-
- if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
- pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
- else {
- if (IS_PINEVIEW(dev)) {
- blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
- max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
- } else {
- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
- }
- level = (bclp * max_backlight) / 255;
- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
- }
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
- return 0;
-}
-
-static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
-{
- /* alsi is the current ALS reading in lux. 0 indicates below sensor
- range, 0xffff indicates above sensor range. 1-0xfffe are valid */
- return 0;
-}
-
-static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- if (pfmb & ASLE_PFMB_PWM_VALID) {
- u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
- blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
- pwm = pwm >> 9;
- /* FIXME - what do we do with the PWM? */
- }
- return 0;
-}
-
-static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
-{
- /* Panel fitting is currently controlled by the X code, so this is a
- noop until modesetting support works fully */
- if (!(pfit & ASLE_PFIT_VALID))
- return ASLE_PFIT_FAIL;
- return 0;
-}
-
-void opregion_asle_intr(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 asle_stat = 0;
- u32 asle_req;
-
- if (!asle)
- return;
-
- asle_req = asle->aslc & ASLE_REQ_MSK;
-
- if (!asle_req) {
- DRM_DEBUG_DRIVER("non asle set request??\n");
- return;
- }
-
- if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, asle->alsi);
-
- if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
-
- if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, asle->pfit);
-
- if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
-
- asle->aslc = asle_stat;
-}
-
-static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 cpu_pwm_ctl, pch_pwm_ctl2;
- u32 max_backlight, level;
-
- if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
-
- bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
-
- cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
- pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
- /* get the max PWM frequency */
- max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
- /* calculate the expected PMW frequency */
- level = (bclp * max_backlight) / 255;
- /* reserve the high 16 bits */
- cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
- /* write the updated PWM frequency */
- I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
-
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
- return 0;
-}
-
-void ironlake_opregion_gse_intr(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 asle_stat = 0;
- u32 asle_req;
-
- if (!asle)
- return;
-
- asle_req = asle->aslc & ASLE_REQ_MSK;
-
- if (!asle_req) {
- DRM_DEBUG_DRIVER("non asle set request??\n");
- return;
- }
-
- if (asle_req & ASLE_SET_ALS_ILLUM) {
- DRM_DEBUG_DRIVER("Illum is not supported\n");
- asle_stat |= ASLE_ALS_ILLUM_FAILED;
- }
-
- if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
-
- if (asle_req & ASLE_SET_PFIT) {
- DRM_DEBUG_DRIVER("Pfit is not supported\n");
- asle_stat |= ASLE_PFIT_FAILED;
- }
-
- if (asle_req & ASLE_SET_PWM_FREQ) {
- DRM_DEBUG_DRIVER("PWM freq is not supported\n");
- asle_stat |= ASLE_PWM_FREQ_FAILED;
- }
-
- asle->aslc = asle_stat;
-}
-#define ASLE_ALS_EN (1<<0)
-#define ASLE_BLC_EN (1<<1)
-#define ASLE_PFIT_EN (1<<2)
-#define ASLE_PFMB_EN (1<<3)
-
-void opregion_enable_asle(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
-
- if (asle) {
- if (IS_MOBILE(dev)) {
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- intel_enable_asle(dev);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock,
- irqflags);
- }
-
- asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
- ASLE_PFMB_EN;
- asle->ardy = 1;
- }
-}
-
-#define ACPI_EV_DISPLAY_SWITCH (1<<0)
-#define ACPI_EV_LID (1<<1)
-#define ACPI_EV_DOCK (1<<2)
-
-static struct intel_opregion *system_opregion;
-
-static int intel_opregion_video_event(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- /* The only video events relevant to opregion are 0x80. These indicate
- either a docking event, lid switch or display switch request. In
- Linux, these are handled by the dock, button and video drivers.
- We might want to fix the video driver to be opregion-aware in
- future, but right now we just indicate to the firmware that the
- request has been handled */
-
- struct opregion_acpi *acpi;
-
- if (!system_opregion)
- return NOTIFY_DONE;
-
- acpi = system_opregion->acpi;
- acpi->csts = 0;
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block intel_opregion_notifier = {
- .notifier_call = intel_opregion_video_event,
-};
-
-/*
- * Initialise the DIDL field in opregion. This passes a list of devices to
- * the firmware. Values are defined by section B.4.2 of the ACPI specification
- * (version 3)
- */
-
-static void intel_didl_outputs(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- int output_type = ACPI_OTHER_OUTPUT;
- if (i >= 8) {
- dev_printk (KERN_ERR, &dev->pdev->dev,
- "More than 8 outputs detected\n");
- return;
- }
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_DVIA:
- output_type = ACPI_VGA_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_9PinDIN:
- output_type = ACPI_TV_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- output_type = ACPI_DIGITAL_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- output_type = ACPI_LVDS_OUTPUT;
- break;
- }
- opregion->acpi->didl[i] |= (1<<31) | output_type | i;
- i++;
- }
-
- /* If fewer than 8 outputs, the list must be null terminated */
- if (i < 8)
- opregion->acpi->didl[i] = 0;
-}
-
-int intel_opregion_init(struct drm_device *dev, int resume)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
- void *base;
- u32 asls, mboxes;
- int err = 0;
-
- pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
- DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
- if (asls == 0) {
- DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
- return -ENOTSUPP;
- }
-
- base = ioremap(asls, OPREGION_SZ);
- if (!base)
- return -ENOMEM;
-
- opregion->header = base;
- if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG_DRIVER("opregion signature mismatch\n");
- err = -EINVAL;
- goto err_out;
- }
-
- mboxes = opregion->header->mboxes;
- if (mboxes & MBOX_ACPI) {
- DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
- opregion->acpi = base + OPREGION_ACPI_OFFSET;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_didl_outputs(dev);
- } else {
- DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
- err = -ENOTSUPP;
- goto err_out;
- }
- opregion->enabled = 1;
-
- if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG_DRIVER("SWSCI supported\n");
- opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- }
- if (mboxes & MBOX_ASLE) {
- DRM_DEBUG_DRIVER("ASLE supported\n");
- opregion->asle = base + OPREGION_ASLE_OFFSET;
- opregion_enable_asle(dev);
- }
-
- if (!resume)
- acpi_video_register();
-
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
-
- return 0;
-
-err_out:
- iounmap(opregion->header);
- opregion->header = NULL;
- return err;
-}
-
-void intel_opregion_free(struct drm_device *dev, int suspend)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->enabled)
- return;
-
- if (!suspend)
- acpi_video_unregister();
-
- opregion->acpi->drdy = 0;
-
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
-
- /* just clear all opregion memory pointers now */
- iounmap(opregion->header);
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
-
- opregion->enabled = 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
new file mode 100644
index 00000000000..d05a2afa17d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+
+struct i915_params i915 __read_mostly = {
+ .modeset = -1,
+ .panel_ignore_lid = 1,
+ .powersave = 1,
+ .semaphores = -1,
+ .lvds_downclock = 0,
+ .lvds_channel_mode = 0,
+ .panel_use_ssc = -1,
+ .vbt_sdvo_panel_type = -1,
+ .enable_rc6 = -1,
+ .enable_fbc = -1,
+ .enable_hangcheck = true,
+ .enable_ppgtt = -1,
+ .enable_psr = 0,
+ .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .disable_power_well = 1,
+ .enable_ips = 1,
+ .fastboot = 0,
+ .prefault_disable = 0,
+ .reset = true,
+ .invert_brightness = 0,
+ .disable_display = 0,
+ .enable_cmd_parser = 1,
+ .disable_vtd_wa = 0,
+};
+
+module_param_named(modeset, i915.modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+ "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "1=on, -1=force vga console preference [default])");
+
+module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+ "-1=force lid closed, -2=force lid open)");
+
+module_param_named(powersave, i915.powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+ "Enable powersavings, fbc, downclocking, etc. (default: true)");
+
+module_param_named(semaphores, i915.semaphores, int, 0400);
+MODULE_PARM_DESC(semaphores,
+ "Use semaphores for inter-ring sync "
+ "(default: -1 (use per-chip defaults))");
+
+module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
+MODULE_PARM_DESC(enable_rc6,
+ "Enable power-saving render C-state 6. "
+ "Different stages can be selected via bitmask values "
+ "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+ "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+ "default: -1 (use per-chip default)");
+
+module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
+MODULE_PARM_DESC(enable_fbc,
+ "Enable frame buffer compression for power savings "
+ "(default: -1 (use per-chip default))");
+
+module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+ "Use panel (LVDS/eDP) downclocking for power savings "
+ "(default: false)");
+
+module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: auto from VBT)");
+
+module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+module_param_named(reset, i915.reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+ "Periodically check GPU activity for detecting hangs. "
+ "WARNING: Disabling this can cause system wide hangs. "
+ "(default: true)");
+
+module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
+MODULE_PARM_DESC(enable_ppgtt,
+ "Override PPGTT usage. "
+ "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
+
+module_param_named(enable_psr, i915.enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+ "Enable preliminary hardware support.");
+
+module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+ "Disable the power well when possible (default: true)");
+
+module_param_named(enable_ips, i915.enable_ips, int, 0600);
+MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+
+module_param_named(fastboot, i915.fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot,
+ "Try to skip unnecessary mode sets at boot time (default: false)");
+
+module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+ "Disable page prefaulting for pread/pwrite/reloc (default:false). "
+ "For developers only.");
+
+module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
+MODULE_PARM_DESC(invert_brightness,
+ "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+
+module_param_named(disable_display, i915.disable_display, bool, 0600);
+MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+
+module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
+MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
+
+module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+MODULE_PARM_DESC(enable_cmd_parser,
+ "Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 149d360d64a..a5bab61bfc0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,33 +25,15 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- */
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define INTEL_GMCH_ENABLED 0x4
-#define INTEL_GMCH_MEM_MASK 0x1
-#define INTEL_GMCH_MEM_64M 0x1
-#define INTEL_GMCH_MEM_128M 0
-
-#define INTEL_GMCH_GMS_MASK (0xf << 4)
-#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
+
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
+#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
/* PCI config space */
@@ -61,10 +43,17 @@
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
#define GC_CLOCK_166_250 (3 << 0)
+#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
+#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
+#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
+#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
+#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
+#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
#define GC_DISPLAY_CLOCK_MASK (7 << 4)
#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
@@ -85,11 +74,74 @@
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
-#define LBB 0xf4
-#define GDRST 0xc0
-#define GDRST_FULL (0<<2)
-#define GDRST_RENDER (1<<2)
-#define GDRST_MEDIA (3<<2)
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define GRDOM_FULL (0<<2)
+#define GRDOM_RENDER (1<<2)
+#define GRDOM_MEDIA (3<<2)
+#define GRDOM_MASK (3<<2)
+#define GRDOM_RESET_ENABLE (1<<0)
+
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define ILK_GRDOM_FULL (0<<1)
+#define ILK_GRDOM_RENDER (1<<1)
+#define ILK_GRDOM_MEDIA (3<<1)
+#define ILK_GRDOM_MASK (3<<1)
+#define ILK_GRDOM_RESET_ENABLE (1<<0)
+
+#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3<<21)
+#define GEN6_MBC_SNPCR_MAX (0<<21)
+#define GEN6_MBC_SNPCR_MED (1<<21)
+#define GEN6_MBC_SNPCR_LOW (2<<21)
+#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+
+#define VLV_G3DCTL 0x9024
+#define VLV_GSCKGCTL 0x9028
+
+#define GEN6_MBCTL 0x0907c
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
+
+#define GEN6_GDRST 0x941c
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+
+#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
+#define PP_DIR_DCLV_2G 0xffffffff
+
+#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
+#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
+
+#define GAM_ECOCHK 0x4090
+#define ECOCHK_SNB_BIT (1<<10)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
+#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
+#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
+#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
+#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
+#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+
+#define GAC_ECO_BITS 0x14090
+#define ECOBITS_SNB_BIT (1<<13)
+#define ECOBITS_PPGTT_CACHE64B (3<<8)
+#define ECOBITS_PPGTT_CACHE4B (0<<8)
+
+#define GAB_CTL 0x24000
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
/* VGA stuff */
@@ -102,6 +154,7 @@
#define VGA_MSR_CGA_MODE (1<<0)
#define VGA_SR_INDEX 0x3c4
+#define SR01 1
#define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0
@@ -133,9 +186,23 @@
#define VGA_CR_DATA_CGA 0x3d5
/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT 29
+#define INSTR_CLIENT_MASK 0xE0000000
+#define INSTR_MI_CLIENT 0x0
+#define INSTR_BC_CLIENT 0x2
+#define INSTR_RC_CLIENT 0x3
+#define INSTR_SUBCLIENT_SHIFT 27
+#define INSTR_SUBCLIENT_MASK 0x18000000
+#define INSTR_MEDIA_SUBCLIENT 0x2
+
+/*
* Memory interface instructions used by the kernel
*/
#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
+#define MI_GLOBAL_GTT (1<<22)
#define MI_NOOP MI_INSTR(0, 0)
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
@@ -150,24 +217,92 @@
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
-#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
-#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
+#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
#define MI_OVERLAY_OFF (0x2<<21)
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
+#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
+#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
+#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
+#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
+#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
+#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
+#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
+#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
+#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
+#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
+#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
+#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+#define MI_SEMAPHORE_SYNC_MASK (3<<16)
+#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+#define MI_MM_SPACE_GTT (1<<8)
+#define MI_MM_SPACE_PHYSICAL (0<<8)
+#define MI_SAVE_EXT_STATE_EN (1<<3)
+#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_FORCE_RESTORE (1<<1)
+#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
+#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
+#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_FLUSH_DW_OP_MASK (3<<14)
+#define MI_FLUSH_DW_NOTIFY (1<<8)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-#define MI_BATCH_NON_SECURE (1)
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+
+
+#define MI_PREDICATE_RESULT_2 (0x2214)
+#define LOWER_SLICE_ENABLED (1<<0)
+#define LOWER_SLICE_DISABLED (0<<0)
/*
* 3D instructions used by the kernel
@@ -210,8 +345,568 @@
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
+#define PIPE_CONTROL_MMIO_WRITE (1<<23)
+#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
+#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
+#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
+#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
+#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
+#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
+#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
+#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+/*
+ * Commands used only by the command parser
+ */
+#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
+#define MI_ARB_CHECK MI_INSTR(0x05, 0)
+#define MI_RS_CONTROL MI_INSTR(0x06, 0)
+#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
+#define MI_PREDICATE MI_INSTR(0x0C, 0)
+#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
+#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
+#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
+#define MI_URB_CLEAR MI_INSTR(0x19, 0)
+#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
+#define MI_CLFLUSH MI_INSTR(0x27, 0)
+#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
+#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
+#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
+#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
+#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
+#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
+#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+
+#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
+#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
+#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
+#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
+#define GFX_OP_3DSTATE_SO_DECL_LIST \
+ ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
+
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
+
+#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
+
+#define COLOR_BLT ((0x2<<29)|(0x40<<22))
+#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
/*
+ * Registers used only by the command parser
+ */
+#define BCS_SWCTRL 0x22200
+
+#define HS_INVOCATION_COUNT 0x2300
+#define DS_INVOCATION_COUNT 0x2308
+#define IA_VERTICES_COUNT 0x2310
+#define IA_PRIMITIVES_COUNT 0x2318
+#define VS_INVOCATION_COUNT 0x2320
+#define GS_INVOCATION_COUNT 0x2328
+#define GS_PRIMITIVES_COUNT 0x2330
+#define CL_INVOCATION_COUNT 0x2338
+#define CL_PRIMITIVES_COUNT 0x2340
+#define PS_INVOCATION_COUNT 0x2348
+#define PS_DEPTH_COUNT 0x2350
+
+/* There are the 4 64-bit counter registers, one for each stream output */
+#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
+
+#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
+
+#define GEN7_3DPRIM_END_OFFSET 0x2420
+#define GEN7_3DPRIM_START_VERTEX 0x2430
+#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
+#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
+#define GEN7_3DPRIM_START_INSTANCE 0x243C
+#define GEN7_3DPRIM_BASE_VERTEX 0x2440
+
+#define OACONTROL 0x2360
+
+#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
+#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
+#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
+ _GEN7_PIPEA_DE_LOAD_SL, \
+ _GEN7_PIPEB_DE_LOAD_SL)
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830 0x6070
+#define DEBUG_RESET_FULL (1<<7)
+#define DEBUG_RESET_RENDER (1<<8)
+#define DEBUG_RESET_DISPLAY (1<<9)
+
+/*
+ * IOSF sideband
+ */
+#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100)
+#define IOSF_DEVFN_SHIFT 24
+#define IOSF_OPCODE_SHIFT 16
+#define IOSF_PORT_SHIFT 8
+#define IOSF_BYTE_ENABLES_SHIFT 4
+#define IOSF_BAR_SHIFT 1
+#define IOSF_SB_BUSY (1<<0)
+#define IOSF_PORT_BUNIT 0x3
+#define IOSF_PORT_PUNIT 0x4
+#define IOSF_PORT_NC 0x11
+#define IOSF_PORT_DPIO 0x12
+#define IOSF_PORT_DPIO_2 0x1a
+#define IOSF_PORT_GPIO_NC 0x13
+#define IOSF_PORT_CCK 0x14
+#define IOSF_PORT_CCU 0xA9
+#define IOSF_PORT_GPS_CORE 0x48
+#define IOSF_PORT_FLISDSI 0x1B
+#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
+#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
+
+/* See configdb bunit SB addr map */
+#define BUNIT_REG_BISOC 0x11
+
+#define PUNIT_REG_DSPFREQ 0x36
+#define DSPFREQSTAT_SHIFT 30
+#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
+#define DSPFREQGUAR_SHIFT 14
+#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+
+/* See the PUNIT HAS v0.8 for the below bits */
+enum punit_power_well {
+ PUNIT_POWER_WELL_RENDER = 0,
+ PUNIT_POWER_WELL_MEDIA = 1,
+ PUNIT_POWER_WELL_DISP2D = 3,
+ PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
+ PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
+ PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
+ PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
+ PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
+ PUNIT_POWER_WELL_DPIO_RX0 = 10,
+ PUNIT_POWER_WELL_DPIO_RX1 = 11,
+
+ PUNIT_POWER_WELL_NUM,
+};
+
+#define PUNIT_REG_PWRGT_CTRL 0x60
+#define PUNIT_REG_PWRGT_STATUS 0x61
+#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
+#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
+#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
+#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
+#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
+
+#define PUNIT_REG_GPU_LFM 0xd3
+#define PUNIT_REG_GPU_FREQ_REQ 0xd4
+#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define GENFREQSTATUS (1<<0)
+#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
+
+#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
+#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+
+#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
+#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
+#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
+#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11
+#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800
+#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34
+#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007
+#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30
+#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
+#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+
+/* vlv2 north clock has */
+#define CCK_FUSE_REG 0x8
+#define CCK_FUSE_HPLL_FREQ_MASK 0x3
+#define CCK_REG_DSI_PLL_FUSE 0x44
+#define CCK_REG_DSI_PLL_CONTROL 0x48
+#define DSI_PLL_VCO_EN (1 << 31)
+#define DSI_PLL_LDO_GATE (1 << 30)
+#define DSI_PLL_P1_POST_DIV_SHIFT 17
+#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
+#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
+#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
+#define DSI_PLL_MUX_MASK (3 << 9)
+#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
+#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
+#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
+#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
+#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
+#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
+#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
+#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
+#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
+#define DSI_PLL_LOCK (1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER 0x4c
+#define DSI_PLL_LFSR (1 << 31)
+#define DSI_PLL_FRACTION_EN (1 << 30)
+#define DSI_PLL_FRAC_COUNTER_SHIFT 27
+#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
+#define DSI_PLL_USYNC_CNT_SHIFT 18
+#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
+#define DSI_PLL_N1_DIV_SHIFT 16
+#define DSI_PLL_N1_DIV_MASK (3 << 16)
+#define DSI_PLL_M1_DIV_SHIFT 0
+#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
+#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+
+/**
+ * DOC: DPIO
+ *
+ * VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally the common lane corresponds to the pipe and
+ * the spline (PCS/TX) correponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * Note: digital port B is DDI0, digital port C is DDI1,
+ * digital port D is DDI2
+ */
+/*
+ * Dual channel PHY (VLV/CHV)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
+ */
+#define DPIO_DEVFN 0
+
+#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
+#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1<<1)
+#define DPIO_CMNRST (1<<0)
+
+#define DPIO_PHY(pipe) ((pipe) >> 1)
+#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
+
+/*
+ * Per pipe/PLL DPIO regs
+ */
+#define _VLV_PLL_DW3_CH0 0x800c
+#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
+#define DPIO_POST_DIV_DAC 0
+#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
+#define DPIO_POST_DIV_LVDS1 2
+#define DPIO_POST_DIV_LVDS2 3
+#define DPIO_K_SHIFT (24) /* 4 bits */
+#define DPIO_P1_SHIFT (21) /* 3 bits */
+#define DPIO_P2_SHIFT (16) /* 5 bits */
+#define DPIO_N_SHIFT (12) /* 4 bits */
+#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
+#define DPIO_M2DIV_MASK 0xff
+#define _VLV_PLL_DW3_CH1 0x802c
+#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
+
+#define _VLV_PLL_DW5_CH0 0x8014
+#define DPIO_REFSEL_OVERRIDE 27
+#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
+#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_PLL_REFCLK_SEL_MASK 3
+#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
+#define _VLV_PLL_DW5_CH1 0x8034
+#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
+
+#define _VLV_PLL_DW7_CH0 0x801c
+#define _VLV_PLL_DW7_CH1 0x803c
+#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
+
+#define _VLV_PLL_DW8_CH0 0x8040
+#define _VLV_PLL_DW8_CH1 0x8060
+#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
+
+#define VLV_PLL_DW9_BCAST 0xc044
+#define _VLV_PLL_DW9_CH0 0x8044
+#define _VLV_PLL_DW9_CH1 0x8064
+#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
+
+#define _VLV_PLL_DW10_CH0 0x8048
+#define _VLV_PLL_DW10_CH1 0x8068
+#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
+
+#define _VLV_PLL_DW11_CH0 0x804c
+#define _VLV_PLL_DW11_CH1 0x806c
+#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
+
+/* Spec for ref block start counts at DW10 */
+#define VLV_REF_DW13 0x80ac
+
+#define VLV_CMN_DW0 0x8100
+
+/*
+ * Per DDI channel DPIO regs
+ */
+
+#define _VLV_PCS_DW0_CH0 0x8200
+#define _VLV_PCS_DW0_CH1 0x8400
+#define DPIO_PCS_TX_LANE2_RESET (1<<16)
+#define DPIO_PCS_TX_LANE1_RESET (1<<7)
+#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
+
+#define _VLV_PCS01_DW0_CH0 0x200
+#define _VLV_PCS23_DW0_CH0 0x400
+#define _VLV_PCS01_DW0_CH1 0x2600
+#define _VLV_PCS23_DW0_CH1 0x2800
+#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
+#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
+
+#define _VLV_PCS_DW1_CH0 0x8204
+#define _VLV_PCS_DW1_CH1 0x8404
+#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
+#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
+#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+
+#define _VLV_PCS01_DW1_CH0 0x204
+#define _VLV_PCS23_DW1_CH0 0x404
+#define _VLV_PCS01_DW1_CH1 0x2604
+#define _VLV_PCS23_DW1_CH1 0x2804
+#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
+#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
+
+#define _VLV_PCS_DW8_CH0 0x8220
+#define _VLV_PCS_DW8_CH1 0x8420
+#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
+
+#define _VLV_PCS01_DW8_CH0 0x0220
+#define _VLV_PCS23_DW8_CH0 0x0420
+#define _VLV_PCS01_DW8_CH1 0x2620
+#define _VLV_PCS23_DW8_CH1 0x2820
+#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
+#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
+
+#define _VLV_PCS_DW9_CH0 0x8224
+#define _VLV_PCS_DW9_CH1 0x8424
+#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+
+#define _CHV_PCS_DW10_CH0 0x8228
+#define _CHV_PCS_DW10_CH1 0x8428
+#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
+#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
+
+#define _VLV_PCS01_DW10_CH0 0x0228
+#define _VLV_PCS23_DW10_CH0 0x0428
+#define _VLV_PCS01_DW10_CH1 0x2628
+#define _VLV_PCS23_DW10_CH1 0x2828
+#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
+#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
+
+#define _VLV_PCS_DW11_CH0 0x822c
+#define _VLV_PCS_DW11_CH1 0x842c
+#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+
+#define _VLV_PCS_DW12_CH0 0x8230
+#define _VLV_PCS_DW12_CH1 0x8430
+#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
+
+#define _VLV_PCS_DW14_CH0 0x8238
+#define _VLV_PCS_DW14_CH1 0x8438
+#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
+
+#define _VLV_PCS_DW23_CH0 0x825c
+#define _VLV_PCS_DW23_CH1 0x845c
+#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
+
+#define _VLV_TX_DW2_CH0 0x8288
+#define _VLV_TX_DW2_CH1 0x8488
+#define DPIO_SWING_MARGIN_SHIFT 16
+#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT)
+#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
+#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
+
+#define _VLV_TX_DW3_CH0 0x828c
+#define _VLV_TX_DW3_CH1 0x848c
+/* The following bit for CHV phy */
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
+#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
+
+#define _VLV_TX_DW4_CH0 0x8290
+#define _VLV_TX_DW4_CH1 0x8490
+#define DPIO_SWING_DEEMPH9P5_SHIFT 24
+#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
+#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
+
+#define _VLV_TX3_DW4_CH0 0x690
+#define _VLV_TX3_DW4_CH1 0x2a90
+#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
+
+#define _VLV_TX_DW5_CH0 0x8294
+#define _VLV_TX_DW5_CH1 0x8494
+#define DPIO_TX_OCALINIT_EN (1<<31)
+#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
+
+#define _VLV_TX_DW11_CH0 0x82ac
+#define _VLV_TX_DW11_CH1 0x84ac
+#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
+
+#define _VLV_TX_DW14_CH0 0x82b8
+#define _VLV_TX_DW14_CH1 0x84b8
+#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
+
+/* CHV dpPhy registers */
+#define _CHV_PLL_DW0_CH0 0x8000
+#define _CHV_PLL_DW0_CH1 0x8180
+#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
+
+#define _CHV_PLL_DW1_CH0 0x8004
+#define _CHV_PLL_DW1_CH1 0x8184
+#define DPIO_CHV_N_DIV_SHIFT 8
+#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
+#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
+
+#define _CHV_PLL_DW2_CH0 0x8008
+#define _CHV_PLL_DW2_CH1 0x8188
+#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
+
+#define _CHV_PLL_DW3_CH0 0x800c
+#define _CHV_PLL_DW3_CH1 0x818c
+#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
+#define DPIO_CHV_FIRST_MOD (0 << 8)
+#define DPIO_CHV_SECOND_MOD (1 << 8)
+#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
+#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
+
+#define _CHV_PLL_DW6_CH0 0x8018
+#define _CHV_PLL_DW6_CH1 0x8198
+#define DPIO_CHV_GAIN_CTRL_SHIFT 16
+#define DPIO_CHV_INT_COEFF_SHIFT 8
+#define DPIO_CHV_PROP_COEFF_SHIFT 0
+#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+
+#define _CHV_CMN_DW13_CH0 0x8134
+#define _CHV_CMN_DW0_CH1 0x8080
+#define DPIO_CHV_S1_DIV_SHIFT 21
+#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
+#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
+#define DPIO_CHV_K_DIV_SHIFT 4
+#define DPIO_PLL_FREQLOCK (1 << 1)
+#define DPIO_PLL_LOCK (1 << 0)
+#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
+
+#define _CHV_CMN_DW14_CH0 0x8138
+#define _CHV_CMN_DW1_CH1 0x8084
+#define DPIO_AFC_RECAL (1 << 14)
+#define DPIO_DCLKP_EN (1 << 13)
+#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+
+#define CHV_CMN_DW30 0x8178
+#define DPIO_LRC_BYPASS (1 << 3)
+
+#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
+ (lane) * 0x200 + (offset))
+
+#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
+#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
+#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
+#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
+#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
+#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
+#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
+#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
+#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
+#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
+#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
+#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
+#define DPIO_FRC_LATENCY_SHFIT 8
+#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
+#define DPIO_UPAR_SHIFT 30
+/*
* Fence registers
*/
#define FENCE_REG_830_0 0x2000
@@ -221,7 +916,7 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
@@ -234,14 +929,85 @@
#define I965_FENCE_REG_VALID (1<<0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
+#define FENCE_REG_SANDYBRIDGE_0 0x100000
+#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
+
+/* control register for cpu gtt access */
+#define TILECTL 0x101000
+#define TILECTL_SWZCTL (1 << 0)
+#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
+#define TILECTL_BACKSNOOP_DIS (1 << 3)
+
/*
* Instruction and interrupt control regs
*/
+#define PGTBL_CTL 0x02020
+#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
+#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
-#define PRB0_TAIL 0x02030
-#define PRB0_HEAD 0x02034
-#define PRB0_START 0x02038
-#define PRB0_CTL 0x0203c
+#define RENDER_RING_BASE 0x02000
+#define BSD_RING_BASE 0x04000
+#define GEN6_BSD_RING_BASE 0x12000
+#define GEN8_BSD2_RING_BASE 0x1c000
+#define VEBOX_RING_BASE 0x1a000
+#define BLT_RING_BASE 0x22000
+#define RING_TAIL(base) ((base)+0x30)
+#define RING_HEAD(base) ((base)+0x34)
+#define RING_START(base) ((base)+0x38)
+#define RING_CTL(base) ((base)+0x3c)
+#define RING_SYNC_0(base) ((base)+0x40)
+#define RING_SYNC_1(base) ((base)+0x44)
+#define RING_SYNC_2(base) ((base)+0x48)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
+#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE))
+#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
+#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
+#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
+#define GEN6_NOSYNC 0
+#define RING_MAX_IDLE(base) ((base)+0x54)
+#define RING_HWS_PGA(base) ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+
+#define GEN7_WR_WATERMARK 0x4028
+#define GEN7_GFX_PRIO_CTRL 0x402C
+#define ARB_MODE 0x4030
+#define ARB_MODE_SWIZZLE_SNB (1<<4)
+#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define GEN7_GFX_PEND_TLB0 0x4034
+#define GEN7_GFX_PEND_TLB1 0x4038
+/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
+#define GEN7_LRA_LIMITS_BASE 0x403C
+#define GEN7_LRA_LIMITS_REG_NUM 13
+#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
+#define GEN7_GFX_MAX_REQ_COUNT 0x4074
+
+#define GAMTARBMODE 0x04a08
+#define ARB_MODE_BWGTLB_DISABLE (1<<9)
+#define ARB_MODE_SWIZZLE_BDW (1<<1)
+#define RENDER_HWS_PGA_GEN7 (0x04080)
+#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
+#define RING_FAULT_GTTSEL_MASK (1<<11)
+#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
+#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
+#define RING_FAULT_VALID (1<<0)
+#define DONE_REG 0x40b0
+#define GEN8_PRIVATE_PAT 0x40e0
+#define BSD_HWS_PGA_GEN7 (0x04180)
+#define BLT_HWS_PGA_GEN7 (0x04280)
+#define VEBOX_HWS_PGA_GEN7 (0x04380)
+#define RING_ACTHD(base) ((base)+0x74)
+#define RING_ACTHD_UDW(base) ((base)+0x5c)
+#define RING_NOPID(base) ((base)+0x94)
+#define RING_IMR(base) ((base)+0xa8)
+#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -254,13 +1020,38 @@
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
+#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+
+#define GEN7_TLB_RD_ADDR 0x4700
+
+#if 0
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#endif
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
+#define GEN7_INSTDONE_1 0x0206c
+#define GEN7_SC_INSTDONE 0x07100
+#define GEN7_SAMPLER_INSTDONE 0x0e160
+#define GEN7_ROW_INSTDONE 0x0e164
+#define I915_NUM_INSTDONE_REG 4
+#define RING_IPEIR(base) ((base)+0x64)
+#define RING_IPEHR(base) ((base)+0x68)
+#define RING_INSTDONE(base) ((base)+0x6c)
+#define RING_INSTPS(base) ((base)+0x70)
+#define RING_DMA_FADD(base) ((base)+0x78)
+#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */
+#define RING_INSTPM(base) ((base)+0xc0)
+#define RING_MI_MODE(base) ((base)+0x9c)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
@@ -274,28 +1065,112 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define DMA_FADD_I8XX 0x020d0
+#define RING_BBSTATE(base) ((base)+0x110)
+#define RING_BBADDR(base) ((base)+0x140)
+#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */
+
+#define ERROR_GEN6 0x040a0
+#define GEN7_ERR_INT 0x44040
+#define ERR_INT_POISON (1<<31)
+#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
+#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
+#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
+#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
+#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
+#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
+
+#define FPGA_DBG 0x42300
+#define FPGA_DBG_RM_NOCLAIM (1<<31)
+
+#define DERRMR 0x44050
+/* Note that HBLANK events are reserved on bdw+ */
+#define DERRMR_PIPEA_SCANLINE (1<<0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
+#define DERRMR_PIPEA_VBLANK (1<<3)
+#define DERRMR_PIPEA_HBLANK (1<<5)
+#define DERRMR_PIPEB_SCANLINE (1<<8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
+#define DERRMR_PIPEB_VBLANK (1<<11)
+#define DERRMR_PIPEB_HBLANK (1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1<<14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
+#define DERRMR_PIPEC_VBLANK (1<<21)
+#define DERRMR_PIPEC_HBLANK (1<<22)
+
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+#define _3D_CHICKEN2 0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
+#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
+
+#define MI_MODE 0x0209c
+# define VS_TIMER_DISPATCH (1 << 6)
+# define MI_FLUSH_ENABLE (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
+# define MODE_IDLE (1 << 9)
+# define STOP_RING (1 << 8)
+
+#define GEN6_GT_MODE 0x20d0
+#define GEN7_GT_MODE 0x7008
+#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
+#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
+#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
+#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
+#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
+
+#define GFX_MODE 0x02520
+#define GFX_MODE_GEN7 0x0229c
+#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
+#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
+#define GFX_SURFACE_FAULT_ENABLE (1<<12)
+#define GFX_REPLAY_MODE (1<<11)
+#define GFX_PSMI_GRANULARITY (1<<10)
+#define GFX_PPGTT_ENABLE (1<<9)
+
+#define VLV_DISPLAY_BASE 0x180000
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+
+#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030)
+#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
-#define I915_HWB_OOM_INTERRUPT (1<<13)
-#define I915_SYNC_STATUS_INTERRUPT (1<<12)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
-#define I915_DEBUG_INTERRUPT (1<<2)
-#define I915_USER_INTERRUPT (1<<1)
-#define I915_ASLE_INTERRUPT (1<<0)
+#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
+#define GINT_DIS (1<<22)
+#define GCFG_DIS (1<<8)
+#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064)
+#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
+#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
+#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
@@ -306,26 +1181,218 @@
#define I915_ERROR_MEMORY_REFRESH (1<<1)
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
+#define INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
-#define FW_BLC2 0x020dc
+#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
-#define FW_BLC_SELF_EN (1<<15)
+#define FW_BLC_SELF_EN_MASK (1<<31)
+#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
+#define FW_BLC_SELF_EN (1<<15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ * fetches. This is not turned on by default
+ */
+#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
+#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
+#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
+#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
+#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define MI_ARB_TIME_SLICE_MASK (7 << 5)
+#define MI_ARB_TIME_SLICE_1 (0 << 5)
+#define MI_ARB_TIME_SLICE_2 (1 << 5)
+#define MI_ARB_TIME_SLICE_4 (2 << 5)
+#define MI_ARB_TIME_SLICE_6 (3 << 5)
+#define MI_ARB_TIME_SLICE_8 (4 << 5)
+#define MI_ARB_TIME_SLICE_10 (5 << 5)
+#define MI_ARB_TIME_SLICE_14 (6 << 5)
+#define MI_ARB_TIME_SLICE_16 (7 << 5)
+
+/* Low priority grace period page size */
+#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
+#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
+
+/* Disable display A/B trickle feed */
+#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
+
+/* Set display plane priority */
+#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
+#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
+
+#define MI_STATE 0x020e4 /* gen2 only */
+#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */
+#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
+
#define CACHE_MODE_0 0x02120 /* 915+ only */
-#define CM0_MASK_SHIFT 16
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
#define CM0_DEPTH_EVICT_DISABLE (1<<4)
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
-
+#define GFX_FLSH_CNTL_GEN6 0x101008
+#define GFX_FLSH_CNTL_EN (1<<0)
+#define ECOSKPD 0x021d0
+#define ECO_GATING_CX_ONLY (1<<3)
+#define ECO_FLIP_DONE (1<<0)
+
+#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
+#define RC_OP_FLUSH_ENABLE (1<<0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define CACHE_MODE_1 0x7004 /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
+
+#define GEN6_BLITTER_ECOSKPD 0x221d0
+#define GEN6_BLITTER_LOCK_SHIFT 16
+#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+
+#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
+#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+
+#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
+#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
+#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
+#define GEN6_BSD_GO_INDICATOR (1 << 4)
+
+/* On modern GEN architectures interrupt control consists of two sets
+ * of registers. The first set pertains to the ring generating the
+ * interrupt. The second control is for the functional block generating the
+ * interrupt. These are PM, GT, DE, etc.
+ *
+ * Luckily *knocks on wood* all the ring interrupt bits match up with the
+ * GT interrupt bits, so we don't need to duplicate the defines.
+ *
+ * These defines should cover us well from SNB->HSW with minor exceptions
+ * it can also work on ILK.
+ */
+#define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
+#define GT_BLT_CS_ERROR_INTERRUPT (1 << 25)
+#define GT_BLT_USER_INTERRUPT (1 << 22)
+#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
+#define GT_BSD_USER_INTERRUPT (1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
+#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
+#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
+#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
+#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
+#define GT_RENDER_USER_INTERRUPT (1 << 0)
+
+#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
+#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
+
+#define GT_PARITY_ERROR(dev) \
+ (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+ (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
+/* These are all the "old" interrupts */
+#define ILK_BSD_USER_INTERRUPT (1<<5)
+
+#define I915_PM_INTERRUPT (1<<31)
+#define I915_ISP_INTERRUPT (1<<22)
+#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
+#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
+#define I915_MIPIB_INTERRUPT (1<<19)
+#define I915_MIPIA_INTERRUPT (1<<18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
+#define I915_MASTER_ERROR_INTERRUPT (1<<15)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
+#define I915_HWB_OOM_INTERRUPT (1<<13)
+#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_MISC_INTERRUPT (1<<11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
+#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_WINVALID_INTERRUPT (1<<1)
+#define I915_USER_INTERRUPT (1<<1)
+#define I915_ASLE_INTERRUPT (1<<0)
+#define I915_BSD_USER_INTERRUPT (1<<25)
+
+#define GEN6_BSD_RNCID 0x12198
+
+#define GEN7_FF_THREAD_MODE 0x20a0
+#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
+#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
+#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
+#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0<<12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0<<4)
/*
* Framebuffer compression (915+ only)
@@ -338,15 +1405,16 @@
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_CTL_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
-#define FBC_CTL_FENCENO (1<<0)
+#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND 0x0320c
#define FBC_CMD_COMPRESS (1<<0)
#define FBC_STATUS 0x03210
#define FBC_STAT_COMPRESSING (1<<31)
#define FBC_STAT_COMPRESSED (1<<30)
#define FBC_STAT_MODIFIED (1<<29)
-#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 0x03214
#define FBC_CTL_FENCE_DBL (0<<4)
#define FBC_CTL_IDLE_IMM (0<<2)
@@ -354,9 +1422,8 @@
#define FBC_CTL_IDLE_LINE (2<<2)
#define FBC_CTL_IDLE_DEBUG (3<<2)
#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANEA (0<<0)
-#define FBC_CTL_PLANEB (1<<0)
-#define FBC_FENCE_OFF 0x0321b
+#define FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300
#define FBC_LL_SIZE (1536)
@@ -365,9 +1432,11 @@
#define DPFC_CB_BASE 0x3200
#define DPFC_CONTROL 0x3208
#define DPFC_CTL_EN (1<<31)
-#define DPFC_CTL_PLANEA (0<<30)
-#define DPFC_CTL_PLANEB (1<<30)
+#define DPFC_CTL_PLANE(plane) ((plane)<<30)
+#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
#define DPFC_CTL_FENCE_EN (1<<29)
+#define IVB_DPFC_CTL_FENCE_EN (1<<28)
+#define DPFC_CTL_PERSISTENT_MODE (1<<25)
#define DPFC_SR_EN (1<<10)
#define DPFC_CTL_LIMIT_1X (0<<6)
#define DPFC_CTL_LIMIT_2X (1<<6)
@@ -388,6 +1457,43 @@
#define DPFC_CHICKEN 0x3224
#define DPFC_HT_MODIFY (1<<31)
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE 0x43200
+#define ILK_DPFC_CONTROL 0x43208
+/* The bit 28-8 is reserved */
+#define DPFC_RESERVED (0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL 0x4320c
+#define ILK_DPFC_STATUS 0x43210
+#define ILK_DPFC_FENCE_YOFF 0x43218
+#define ILK_DPFC_CHICKEN 0x43224
+#define ILK_FBC_RT_BASE 0x2128
+#define ILK_FBC_RT_VALID (1<<0)
+#define SNB_FBC_FRONT_BUFFER (1<<1)
+
+#define ILK_DISPLAY_CHICKEN1 0x42000
+#define ILK_FBCQ_DIS (1<<22)
+#define ILK_PABSTRETCH_DIS (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA 0x100100
+#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET 0x100104
+
+/* Framebuffer compression for Ivybridge */
+#define IVB_FBC_RT_BASE 0x7020
+
+#define IPS_CTL 0x43408
+#define IPS_ENABLE (1 << 31)
+
+#define MSG_FBC_REND_STATE 0x50380
+#define FBC_REND_NUKE (1<<2)
+#define FBC_REND_CACHE_CLEAN (1<<1)
+
/*
* GPIO regs
*/
@@ -414,16 +1520,61 @@
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-#define GMBUS0 0x5100
-#define GMBUS1 0x5104
-#define GMBUS2 0x5108
-#define GMBUS3 0x510c
-#define GMBUS4 0x5110
-#define GMBUS5 0x5120
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPD_CHV 3 /* HDMID_CHV */
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+#define GMBUS_PORT_DPD 6 /* HDMID */
+#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
+#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
/*
* Clock control & power management
*/
+#define DPLL_A_OFFSET 0x6014
+#define DPLL_B_OFFSET 0x6018
+#define CHV_DPLL_C_OFFSET 0x6030
+#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
+ dev_priv->info.display_mmio_offset)
#define VGA0 0x6000
#define VGA1 0x6004
@@ -436,11 +1587,12 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -452,76 +1604,26 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1<<15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
+#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
+#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
+#define DPLL_PORTC_READY_MASK (0xf << 4)
+#define DPLL_PORTB_READY_MASK (0xf)
-#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define I915_CRC_ERROR_ENABLE (1UL<<29)
-#define I915_CRC_DONE_ENABLE (1UL<<28)
-#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
-#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define I915_DPST_EVENT_ENABLE (1UL<<23)
-#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define I915_DPST_EVENT_STATUS (1UL<<7)
-#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
-
-#define SRX_INDEX 0x3c4
-#define SRX_DATA 0x3c5
-#define SR01 1
-#define SR01_SCREEN_OFF (1<<5)
-
-#define PPCR 0x61204
-#define PPCR_ON (1<<0)
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
-#define DVOB 0x61140
-#define DVOB_ON (1<<31)
-#define DVOC 0x61160
-#define DVOC_ON (1<<31)
-#define LVDS 0x61180
-#define LVDS_ON (1<<31)
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
+#define DPLL_PORTD_READY_MASK (0xf)
+#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
+#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \
+ ((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
+#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
+ ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
+#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
+#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30))
-#define ADPA 0x61100
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
-
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-/* Scratch pad debug 0 reg:
- */
-#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
* this field (only one bit may be set).
@@ -559,7 +1661,13 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD 0x0601c /* 965+ only */
+
+#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
+#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
+#define CHV_DPLL_C_MD_OFFSET 0x603c
+#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
+ dev_priv->info.display_mmio_offset)
+
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -596,11 +1704,13 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define DPLL_B_MD 0x06020 /* 965+ only */
-#define FPA0 0x06040
-#define FPA1 0x06044
-#define FPB0 0x06048
-#define FPB1 0x0604c
+
+#define _FPA0 0x06040
+#define _FPA1 0x06044
+#define _FPB0 0x06048
+#define _FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@@ -621,10 +1731,11 @@
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE 0x6104
+#define DSTATE_GFX_RESET_I830 (1<<6)
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
-#define DSPCLK_GATE_D 0x6200
+#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -653,7 +1764,7 @@
# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
-/**
+/*
* This bit must be set on the 830 to prevent hangs when turning off the
* overlay scaler.
*/
@@ -673,12 +1784,12 @@
# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
# define MAG_CLOCK_GATE_DISABLE (1 << 5)
-/** This bit must be unset on 855,865 */
+/* This bit must be unset on 855,865 */
# define MECI_CLOCK_GATE_DISABLE (1 << 4)
# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
# define MEC_CLOCK_GATE_DISABLE (1 << 2)
# define MECO_CLOCK_GATE_DISABLE (1 << 1)
-/** This bit must be set on 855,865. */
+/* This bit must be set on 855,865. */
# define SV_CLOCK_GATE_DISABLE (1 << 0)
# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
@@ -699,14 +1810,14 @@
# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
-/** This bit must always be set on 965G/965GM */
+/* This bit must always be set on 965G/965GM */
# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
-/** This bit must always be set on 965G */
+/* This bit must always be set on 965G */
# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
@@ -731,15 +1842,32 @@
#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
+
+#define VDECCLK_GATE_D 0x620C /* g4x only */
+#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
+
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
+#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
+#define FW_CSPWRDWNEN (1<<15)
+
+#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
+
+#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
+#define CDCLK_FREQ_SHIFT 4
+#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
+#define CZCLK_FREQ_MASK 0xf
+#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
+
/*
* Palette regs
*/
-
-#define PALETTE_A 0x0a000
-#define PALETTE_B 0x0a800
+#define PALETTE_A_OFFSET 0xa000
+#define PALETTE_B_OFFSET 0xa800
+#define CHV_PALETTE_C_OFFSET 0xc000
+#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
+ dev_priv->info.display_mmio_offset)
/* MCH MMIO space */
@@ -750,10 +1878,17 @@
* device 0 function 0's pci config register 0x44 or 0x48 and matches it in
* every way. It is not accessible from the CP register read instructions.
*
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
*/
#define MCHBAR_MIRROR_BASE 0x10000
-/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
+/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
+#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
+
+/* 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
@@ -762,10 +1897,43 @@
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
-/** 965 MCH register controlling DRAM channel configuration */
+/* Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL 0x101a8
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
+/* 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
+/* snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define MAD_DIMM_ECC_MASK (0x3 << 24)
+#define MAD_DIMM_ECC_OFF (0x0 << 24)
+#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define MAD_DIMM_ECC_ON (0x3 << 24)
+#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
+#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
+#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
+#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
+#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
+#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
+#define MAD_DIMM_A_SELECT (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define MAD_DIMM_B_SIZE_SHIFT 8
+#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define MAD_DIMM_A_SIZE_SHIFT 0
+#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+/* snb MCH registers for priority tuning */
+#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define MCH_SSKPD_WM0_MASK 0x3f
+#define MCH_SSKPD_WM0_VAL 0xc
+
+#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
+
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -783,12 +1951,290 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
-/** GM965 GM45 render standby register */
-#define MCHBAR_RENDER_STANDBY 0x111B8
-#define RCX_SW_EXIT (1<<23)
-#define RSX_STATUS_MASK 0x00700000
+#define TSC1 0x11001
+#define TSE (1<<0)
+#define TR1 0x11006
+#define TSFS 0x11020
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
+#define CRSTANDVID 0x11100
+#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE 0x11110
+#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 0x11114
+#define VIDFREQ3 0x11118
+#define VIDFREQ4 0x1111c
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE_ILK 0x11300
+#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL 0x11170 /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST 0x1117c
+#define MEMINTREN 0x11180 /* 16 bits */
+#define MEMINT_RSEXIT_EN (1<<8)
+#define MEMINT_CX_SUPR_EN (1<<7)
+#define MEMINT_CONT_BUSY_EN (1<<6)
+#define MEMINT_AVG_BUSY_EN (1<<5)
+#define MEMINT_EVAL_CHG_EN (1<<4)
+#define MEMINT_MON_IDLE_EN (1<<3)
+#define MEMINT_UP_EVAL_EN (1<<2)
+#define MEMINT_DOWN_EVAL_EN (1<<1)
+#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINTRSTR 0x11182 /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS 0x11184
+#define MEMINT_RSEXIT (1<<7)
+#define MEMINT_CONT_BUSY (1<<6)
+#define MEMINT_AVG_BUSY (1<<5)
+#define MEMINT_EVAL_CHG (1<<4)
+#define MEMINT_MON_IDLE (1<<3)
+#define MEMINT_UP_EVAL (1<<2)
+#define MEMINT_DOWN_EVAL (1<<1)
+#define MEMINT_SW_CMD (1<<0)
+#define MEMMODECTL 0x11190
+#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1<<15)
+#define MEMMODE_SWMODE_EN (1<<14)
+#define MEMMODE_RCLK_GATE (1<<13)
+#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG 0x1119c
+#define MEMSWCTL2 0x1119e /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1<<12)
+#define SFCAVM (1<<11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG 0x111a0
+#define RCBMINAVG 0x111a0
+#define RCUPEI 0x111b0
+#define RCDNEI 0x111b4
+#define RSTDBYCTL 0x111b8
+#define RS1EN (1<<31)
+#define RS2EN (1<<30)
+#define RS3EN (1<<29)
+#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
+#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7<<20)
+#define RSX_STATUS_ON (0<<20)
+#define RSX_STATUS_RC1 (1<<20)
+#define RSX_STATUS_RC1E (2<<20)
+#define RSX_STATUS_RS1 (3<<20)
+#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7<<20)
+#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
+#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1<<17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3<<14)
+#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1<<14)
+#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3<<12)
+#define SLOW_RS123 (0<<12)
+#define SLOW_RS23 (1<<12)
+#define SLOW_RS3 (2<<12)
+#define NORMAL_RS123 (3<<12)
+#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
+#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3<<4)
+#define RS_CSTATE_C367_RS1 (0<<4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define RS_CSTATE_RSVD (2<<4)
+#define RS_CSTATE_C367_RS2 (3<<4)
+#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
+#define VIDCTL 0x111c0
+#define VIDSTS 0x111c8
+#define VIDSTART 0x111cc /* 8 bits */
+#define MEMSTAT_ILK 0x111f8
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG 0x113b8
+#define RCPREVBSYTDNAVG 0x113bc
+#define PMMISC 0x11214
+#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+#define SDEW 0x1124c
+#define CSIEW0 0x11250
+#define CSIEW1 0x11254
+#define CSIEW2 0x11258
+#define PEW 0x1125c
+#define DEW 0x11270
+#define MCHAFE 0x112c0
+#define CSIEC 0x112e0
+#define DMIEC 0x112e4
+#define DDREC 0x112e8
+#define PEG0EC 0x112ec
+#define PEG1EC 0x112f0
+#define GFXEC 0x112f4
+#define RPPREVBSYTUPAVG 0x113b8
+#define RPPREVBSYTDNAVG 0x113bc
+#define ECR 0x11600
+#define ECR_GPFE (1<<31)
+#define ECR_IMONE (1<<30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 0x11608
+#define OGW1 0x1160c
+#define EG0 0x11610
+#define EG1 0x11614
+#define EG2 0x11618
+#define EG3 0x1161c
+#define EG4 0x11620
+#define EG5 0x11624
+#define EG6 0x11628
+#define EG7 0x1162c
+#define PXW 0x11664
+#define PXWL 0x11680
+#define LCFUSE02 0x116c0
+#define LCFUSE_HIV_MASK 0x000000ff
+#define CSIPLL0 0x12c10
+#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_THREAD_STATUS_REG 0x13805c
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
+
+#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+
+/*
+ * Logical Context regs
+ */
+#define CCID 0x2180
+#define CCID_EN (1<<0)
+/*
+ * Notes on SNB/IVB/VLV context size:
+ * - Power context is saved elsewhere (LLC or stolen)
+ * - Ring/execlist context is saved on SNB, not on IVB
+ * - Extended context size already includes render context size
+ * - We always need to follow the extended context size.
+ * SNB BSpec has comments indicating that we should use the
+ * render context size instead if execlists are disabled, but
+ * based on empirical testing that's just nonsense.
+ * - Pipelined/VF state is saved on SNB/IVB respectively
+ * - GT1 size just indicates how much of render context
+ * doesn't need saving on GT1
+ */
+#define CXT_SIZE 0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE 0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
+/* Same as Haswell, but 72064 bytes now. */
+#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
+
+
+#define VLV_CLK_CTL2 0x101104
+#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
+
/*
* Overlay regs
*/
@@ -807,38 +2253,227 @@
* Display engine regs
*/
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A 0x60050
+#define PIPE_CRC_ENABLE (1 << 31)
+/* ivb+ source selection */
+#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
+#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
+#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
+/* ilk+ source selection */
+#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
+#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
+#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
+#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
+#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
+#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
+/* with DP port the pipe source is invalid */
+#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
+#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
+#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
+/* gen3+ source selection */
+#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
+#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
+#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
+#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
+#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
+#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
+#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
+/* gen2 doesn't have source selection bits */
+#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB 0x60064
+#define _PIPE_CRC_RES_2_A_IVB 0x60068
+#define _PIPE_CRC_RES_3_A_IVB 0x6006c
+#define _PIPE_CRC_RES_4_A_IVB 0x60070
+#define _PIPE_CRC_RES_5_A_IVB 0x60074
+
+#define _PIPE_CRC_RES_RED_A 0x60060
+#define _PIPE_CRC_RES_GREEN_A 0x60064
+#define _PIPE_CRC_RES_BLUE_A 0x60068
+#define _PIPE_CRC_RES_RES1_A_I915 0x6006c
+#define _PIPE_CRC_RES_RES2_A_G4X 0x60080
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB 0x61064
+#define _PIPE_CRC_RES_2_B_IVB 0x61068
+#define _PIPE_CRC_RES_3_B_IVB 0x6106c
+#define _PIPE_CRC_RES_4_B_IVB 0x61070
+#define _PIPE_CRC_RES_5_B_IVB 0x61074
+
+#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
+#define PIPE_CRC_RES_1_IVB(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
+#define PIPE_CRC_RES_GREEN(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
+#define PIPE_CRC_RES_BLUE(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+ _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
+
/* Pipe A timing regs */
-#define HTOTAL_A 0x60000
-#define HBLANK_A 0x60004
-#define HSYNC_A 0x60008
-#define VTOTAL_A 0x6000c
-#define VBLANK_A 0x60010
-#define VSYNC_A 0x60014
-#define PIPEASRC 0x6001c
-#define BCLRPAT_A 0x60020
+#define _HTOTAL_A 0x60000
+#define _HBLANK_A 0x60004
+#define _HSYNC_A 0x60008
+#define _VTOTAL_A 0x6000c
+#define _VBLANK_A 0x60010
+#define _VSYNC_A 0x60014
+#define _PIPEASRC 0x6001c
+#define _BCLRPAT_A 0x60020
+#define _VSYNCSHIFT_A 0x60028
/* Pipe B timing regs */
-#define HTOTAL_B 0x61000
-#define HBLANK_B 0x61004
-#define HSYNC_B 0x61008
-#define VTOTAL_B 0x6100c
-#define VBLANK_B 0x61010
-#define VSYNC_B 0x61014
-#define PIPEBSRC 0x6101c
-#define BCLRPAT_B 0x61020
+#define _HTOTAL_B 0x61000
+#define _HBLANK_B 0x61004
+#define _HSYNC_B 0x61008
+#define _VTOTAL_B 0x6100c
+#define _VBLANK_B 0x61010
+#define _VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+#define _VSYNCSHIFT_B 0x61028
+
+#define TRANSCODER_A_OFFSET 0x60000
+#define TRANSCODER_B_OFFSET 0x61000
+#define TRANSCODER_C_OFFSET 0x62000
+#define CHV_TRANSCODER_C_OFFSET 0x63000
+#define TRANSCODER_EDP_OFFSET 0x6f000
+
+#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
+ dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+
+#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
+#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
+#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
+#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
+#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
+#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
+#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
+#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
+#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
+
+/* HSW+ eDP PSR registers */
+#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
+#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
+#define EDP_PSR_ENABLE (1<<31)
+#define EDP_PSR_LINK_DISABLE (0<<27)
+#define EDP_PSR_LINK_STANDBY (1<<27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
+#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
+#define EDP_PSR_TP1_TP2_SEL (0<<11)
+#define EDP_PSR_TP1_TP3_SEL (1<<11)
+#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
+#define EDP_PSR_TP1_TIME_500us (0<<4)
+#define EDP_PSR_TP1_TIME_100us (1<<4)
+#define EDP_PSR_TP1_TIME_2500us (2<<4)
+#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_IDLE_FRAME_SHIFT 0
+
+#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
+#define EDP_PSR_DPCD_COMMAND 0x80060000
+#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
+#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
+#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
+
+#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
+#define EDP_PSR_STATUS_STATE_MASK (7<<29)
+#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
+#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
+#define EDP_PSR_STATUS_LINK_MASK (3<<26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
+#define EDP_PSR_STATUS_COUNT_SHIFT 16
+#define EDP_PSR_STATUS_COUNT_MASK 0xf
+#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
+#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_IDLE_MASK 0xf
+
+#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
+#define EDP_PSR_PERF_CNT_MASK 0xffffff
+
+#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
+#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
+#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
/* VGA port control */
#define ADPA 0x61100
+#define PCH_ADPA 0xe1100
+#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
+
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
+#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
#define ADPA_HSYNC_CNTL_ENABLE 0
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
#define ADPA_VSYNC_ACTIVE_LOW 0
@@ -850,18 +2485,22 @@
#define ADPA_DPMS_STANDBY (2<<10)
#define ADPA_DPMS_OFF (3<<10)
+
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN 0x61110
-#define HDMIB_HOTPLUG_INT_EN (1 << 29)
-#define DPB_HOTPLUG_INT_EN (1 << 29)
-#define HDMIC_HOTPLUG_INT_EN (1 << 28)
-#define DPC_HOTPLUG_INT_EN (1 << 28)
-#define HDMID_HOTPLUG_INT_EN (1 << 27)
-#define DPD_HOTPLUG_INT_EN (1 << 27)
+#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
+ PORTC_HOTPLUG_INT_EN | \
+ PORTD_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
/* must use period 64 on GM45 according to docs */
@@ -877,60 +2516,135 @@
#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
-#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
-
-#define PORT_HOTPLUG_STAT 0x61114
-#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
-#define DPB_HOTPLUG_INT_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
-#define DPC_HOTPLUG_INT_STATUS (1 << 28)
-#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (1 << 27)
+
+#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+/* VLV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
+#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
-
-/* SDVO port control */
-#define SDVOB 0x61140
-#define SDVOC 0x61160
-#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_B_SELECT (1 << 30)
-#define SDVO_STALL_SELECT (1 << 29)
-#define SDVO_INTERRUPT_ENABLE (1 << 26)
-/**
+#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
+#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
+#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
+#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
+
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_G4X | \
+ SDVOC_HOTPLUG_INT_STATUS_G4X | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I915 | \
+ SDVOC_HOTPLUG_INT_STATUS_I915 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define GEN3_SDVOB 0x61140
+#define GEN3_SDVOC 0x61160
+#define GEN4_HDMIB GEN3_SDVOB
+#define GEN4_HDMIC GEN3_SDVOC
+#define CHV_HDMID 0x6116C
+#define PCH_SDVOB 0xe1140
+#define PCH_HDMIB PCH_SDVOB
+#define PCH_HDMIC 0xe1150
+#define PCH_HDMID 0xe1160
+
+#define PORT_DFT_I9XX 0x61150
+#define DC_BALANCE_RESET (1 << 25)
+#define PORT_DFT2_G4X 0x61154
+#define DC_BALANCE_RESET_VLV (1 << 31)
+#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
+#define PIPE_B_SCRAMBLE_RESET (1 << 1)
+#define PIPE_A_SCRAMBLE_RESET (1 << 0)
+
+/* Gen 3 SDVO bits: */
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_MASK (1 << 30)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/*
* 915G/GM SDVO pixel multiplier.
- *
* Programmed value is multiplier - 1, up to 5x.
- *
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
*/
-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
#define SDVO_PORT_MULTIPLY_SHIFT 23
-#define SDVO_PHASE_SELECT_MASK (15 << 19)
-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
-#define SDVOC_GANG_MODE (1 << 16)
-#define SDVO_ENCODING_SDVO (0x0 << 10)
-#define SDVO_ENCODING_HDMI (0x2 << 10)
-/** Requird for HDMI operation */
-#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
-#define SDVO_BORDER_ENABLE (1 << 7)
-#define SDVO_AUDIO_ENABLE (1 << 6)
-/** New with 965, default is to be set */
-#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
-/** New with 965, default is to be set */
-#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
-#define SDVOB_PCIE_CONCURRENCY (1 << 3)
-#define SDVO_DETECTED (1 << 2)
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
+#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
+#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
+#define SDVO_DETECTED (1 << 2)
/* Bits to be preserved when writing */
-#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
-#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+ SDVO_INTERRUPT_ENABLE)
+#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_COLOR_FORMAT_MASK (7 << 26)
+#define SDVO_ENCODING_SDVO (0 << 10)
+#define SDVO_ENCODING_HDMI (2 << 10)
+#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
+#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
+#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
+#define SDVO_AUDIO_ENABLE (1 << 6)
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
+#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+
+/* CHV SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+
/* DVO port control */
#define DVOA 0x61120
@@ -975,8 +2689,14 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+#define LVDS_PIPE_MASK (1 << 30)
+#define LVDS_PIPE(pipe) ((pipe) << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY (1 << 21)
+#define LVDS_HSYNC_POLARITY (1 << 20)
+
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1010,6 +2730,39 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_CTL 0x61170
+/* Pre HSW: */
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT(port) ((port) << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25)
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+
/* Panel power sequencing */
#define PP_STATUS 0x61200
#define PP_ON (1 << 31)
@@ -1022,9 +2775,21 @@
*/
#define PP_READY (1 << 30)
#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_ON (1 << 28)
-#define PP_SEQUENCE_OFF (2 << 28)
-#define PP_SEQUENCE_MASK 0x30000000
+#define PP_SEQUENCE_POWER_UP (1 << 28)
+#define PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define PP_SEQUENCE_MASK (3 << 28)
+#define PP_SEQUENCE_SHIFT 28
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
+#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
+#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
+#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
+#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
@@ -1032,7 +2797,7 @@
#define PP_DIVISOR 0x61210
/* Panel fitting */
-#define PFIT_CONTROL 0x61230
+#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@@ -1050,9 +2815,7 @@
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
-#define PFIT_PGM_RATIOS 0x61234
-#define PFIT_VERT_SCALE_MASK 0xfff00000
-#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -1064,21 +2827,57 @@
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
-#define PFIT_AUTO_RATIOS 0x61238
+#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238)
+
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+ _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+ _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+ _VLV_BLC_HIST_CTL_B)
/* Backlight control */
-#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
-#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
+#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
+#define BLM_TRANSCODER_B BLM_PIPE_B
+#define BLM_TRANSCODER_C BLM_PIPE_C
+#define BLM_TRANSCODER_EDP (3 << 29)
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
*
* The actual value is this field multiplied by two.
*/
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -1088,70 +2887,93 @@
*/
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 0x48250
+#define BLC_PWM_CPU_CTL 0x48254
+
+#define HSW_BLC_PWM2_CTL 0x48350
-#define BLC_HIST_CTL 0x61260
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 0xc8254
+
+#define UTIL_PIN_CTL 0x48400
+#define UTIL_PIN_ENABLE (1 << 31)
+
+#define PCH_GTC_CTL 0xe7000
+#define PCH_GTC_ENABLE (1 << 31)
/* TV port control */
#define TV_CTL 0x68000
-/** Enables the TV encoder */
+/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
-/** Sources the TV encoder input from pipe B instead of A. */
+/* Sources the TV encoder input from pipe B instead of A. */
# define TV_ENC_PIPEB_SELECT (1 << 30)
-/** Outputs composite video (DAC A only) */
+/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
-/** Outputs SVideo video (DAC B/C) */
+/* Outputs SVideo video (DAC B/C) */
# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
-/** Outputs Component video (DAC A/B/C) */
+/* Outputs Component video (DAC A/B/C) */
# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
-/** Outputs Composite and SVideo (DAC A/B/C) */
+/* Outputs Composite and SVideo (DAC A/B/C) */
# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
# define TV_TRILEVEL_SYNC (1 << 21)
-/** Enables slow sync generation (945GM only) */
+/* Enables slow sync generation (945GM only) */
# define TV_SLOW_SYNC (1 << 20)
-/** Selects 4x oversampling for 480i and 576p */
+/* Selects 4x oversampling for 480i and 576p */
# define TV_OVERSAMPLE_4X (0 << 18)
-/** Selects 2x oversampling for 720p and 1080i */
+/* Selects 2x oversampling for 720p and 1080i */
# define TV_OVERSAMPLE_2X (1 << 18)
-/** Selects no oversampling for 1080p */
+/* Selects no oversampling for 1080p */
# define TV_OVERSAMPLE_NONE (2 << 18)
-/** Selects 8x oversampling */
+/* Selects 8x oversampling */
# define TV_OVERSAMPLE_8X (3 << 18)
-/** Selects progressive mode rather than interlaced */
+/* Selects progressive mode rather than interlaced */
# define TV_PROGRESSIVE (1 << 17)
-/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
+/* Sets the colorburst to PAL mode. Required for non-M PAL modes. */
# define TV_PAL_BURST (1 << 16)
-/** Field for setting delay of Y compared to C */
+/* Field for setting delay of Y compared to C */
# define TV_YC_SKEW_MASK (7 << 12)
-/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+/* Enables a fix for 480p/576p standard definition modes on the 915GM only */
# define TV_ENC_SDP_FIX (1 << 11)
-/**
+/*
* Enables a fix for the 915GM only.
*
* Not sure what it does.
*/
# define TV_ENC_C0_FIX (1 << 10)
-/** Bits that must be preserved by software */
+/* Bits that must be preserved by software */
# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
# define TV_FUSE_STATE_MASK (3 << 4)
-/** Read-only state that reports all features enabled */
+/* Read-only state that reports all features enabled */
# define TV_FUSE_STATE_ENABLED (0 << 4)
-/** Read-only state that reports that Macrovision is disabled in hardware*/
+/* Read-only state that reports that Macrovision is disabled in hardware*/
# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
-/** Read-only state that reports that TV-out is disabled in hardware. */
+/* Read-only state that reports that TV-out is disabled in hardware. */
# define TV_FUSE_STATE_DISABLED (2 << 4)
-/** Normal operation */
+/* Normal operation */
# define TV_TEST_MODE_NORMAL (0 << 0)
-/** Encoder test pattern 1 - combo pattern */
+/* Encoder test pattern 1 - combo pattern */
# define TV_TEST_MODE_PATTERN_1 (1 << 0)
-/** Encoder test pattern 2 - full screen vertical 75% color bars */
+/* Encoder test pattern 2 - full screen vertical 75% color bars */
# define TV_TEST_MODE_PATTERN_2 (2 << 0)
-/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+/* Encoder test pattern 3 - full screen horizontal 75% color bars */
# define TV_TEST_MODE_PATTERN_3 (3 << 0)
-/** Encoder test pattern 4 - random noise */
+/* Encoder test pattern 4 - random noise */
# define TV_TEST_MODE_PATTERN_4 (4 << 0)
-/** Encoder test pattern 5 - linear color ramps */
+/* Encoder test pattern 5 - linear color ramps */
# define TV_TEST_MODE_PATTERN_5 (5 << 0)
-/**
+/*
* This test mode forces the DACs to 50% of full output.
*
* This is used for load detection in combination with TVDAC_SENSE_MASK
@@ -1160,35 +2982,36 @@
# define TV_TEST_MODE_MASK (7 << 0)
#define TV_DAC 0x68004
-/**
+# define TV_DAC_SAVE 0x00ffff00
+/*
* Reports that DAC state change logic has reported change (RO).
*
* This gets cleared when TV_DAC_STATE_EN is cleared
*/
# define TVDAC_STATE_CHG (1 << 31)
# define TVDAC_SENSE_MASK (7 << 28)
-/** Reports that DAC A voltage is above the detect threshold */
+/* Reports that DAC A voltage is above the detect threshold */
# define TVDAC_A_SENSE (1 << 30)
-/** Reports that DAC B voltage is above the detect threshold */
+/* Reports that DAC B voltage is above the detect threshold */
# define TVDAC_B_SENSE (1 << 29)
-/** Reports that DAC C voltage is above the detect threshold */
+/* Reports that DAC C voltage is above the detect threshold */
# define TVDAC_C_SENSE (1 << 28)
-/**
+/*
* Enables DAC state detection logic, for load-based TV detection.
*
* The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
* to off, for load detection to work.
*/
# define TVDAC_STATE_CHG_EN (1 << 27)
-/** Sets the DAC A sense value to high */
+/* Sets the DAC A sense value to high */
# define TVDAC_A_SENSE_CTL (1 << 26)
-/** Sets the DAC B sense value to high */
+/* Sets the DAC B sense value to high */
# define TVDAC_B_SENSE_CTL (1 << 25)
-/** Sets the DAC C sense value to high */
+/* Sets the DAC C sense value to high */
# define TVDAC_C_SENSE_CTL (1 << 24)
-/** Overrides the ENC_ENABLE and DAC voltage levels */
+/* Overrides the ENC_ENABLE and DAC voltage levels */
# define DAC_CTL_OVERRIDE (1 << 7)
-/** Sets the slew rate. Must be preserved in software */
+/* Sets the slew rate. Must be preserved in software */
# define ENC_TVDAC_SLEW_FAST (1 << 6)
# define DAC_A_1_3_V (0 << 4)
# define DAC_A_1_1_V (1 << 4)
@@ -1203,7 +3026,7 @@
# define DAC_C_0_7_V (2 << 0)
# define DAC_C_MASK (3 << 0)
-/**
+/*
* CSC coefficients are stored in a floating point format with 9 bits of
* mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
* where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
@@ -1218,7 +3041,7 @@
#define TV_CSC_Y2 0x68014
# define TV_BY_MASK 0x07ff0000
# define TV_BY_SHIFT 16
-/**
+/*
* Y attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -1235,7 +3058,7 @@
#define TV_CSC_U2 0x6801c
# define TV_BU_MASK 0x07ff0000
# define TV_BU_SHIFT 16
-/**
+/*
* U attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -1252,7 +3075,7 @@
#define TV_CSC_V2 0x68024
# define TV_BV_MASK 0x07ff0000
# define TV_BV_SHIFT 16
-/**
+/*
* V attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -1261,74 +3084,74 @@
# define TV_AV_SHIFT 0
#define TV_CLR_KNOBS 0x68028
-/** 2s-complement brightness adjustment */
+/* 2s-complement brightness adjustment */
# define TV_BRIGHTNESS_MASK 0xff000000
# define TV_BRIGHTNESS_SHIFT 24
-/** Contrast adjustment, as a 2.6 unsigned floating point number */
+/* Contrast adjustment, as a 2.6 unsigned floating point number */
# define TV_CONTRAST_MASK 0x00ff0000
# define TV_CONTRAST_SHIFT 16
-/** Saturation adjustment, as a 2.6 unsigned floating point number */
+/* Saturation adjustment, as a 2.6 unsigned floating point number */
# define TV_SATURATION_MASK 0x0000ff00
# define TV_SATURATION_SHIFT 8
-/** Hue adjustment, as an integer phase angle in degrees */
+/* Hue adjustment, as an integer phase angle in degrees */
# define TV_HUE_MASK 0x000000ff
# define TV_HUE_SHIFT 0
#define TV_CLR_LEVEL 0x6802c
-/** Controls the DAC level for black */
+/* Controls the DAC level for black */
# define TV_BLACK_LEVEL_MASK 0x01ff0000
# define TV_BLACK_LEVEL_SHIFT 16
-/** Controls the DAC level for blanking */
+/* Controls the DAC level for blanking */
# define TV_BLANK_LEVEL_MASK 0x000001ff
# define TV_BLANK_LEVEL_SHIFT 0
#define TV_H_CTL_1 0x68030
-/** Number of pixels in the hsync. */
+/* Number of pixels in the hsync. */
# define TV_HSYNC_END_MASK 0x1fff0000
# define TV_HSYNC_END_SHIFT 16
-/** Total number of pixels minus one in the line (display and blanking). */
+/* Total number of pixels minus one in the line (display and blanking). */
# define TV_HTOTAL_MASK 0x00001fff
# define TV_HTOTAL_SHIFT 0
#define TV_H_CTL_2 0x68034
-/** Enables the colorburst (needed for non-component color) */
+/* Enables the colorburst (needed for non-component color) */
# define TV_BURST_ENA (1 << 31)
-/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+/* Offset of the colorburst from the start of hsync, in pixels minus one. */
# define TV_HBURST_START_SHIFT 16
# define TV_HBURST_START_MASK 0x1fff0000
-/** Length of the colorburst */
+/* Length of the colorburst */
# define TV_HBURST_LEN_SHIFT 0
# define TV_HBURST_LEN_MASK 0x0001fff
#define TV_H_CTL_3 0x68038
-/** End of hblank, measured in pixels minus one from start of hsync */
+/* End of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_END_SHIFT 16
# define TV_HBLANK_END_MASK 0x1fff0000
-/** Start of hblank, measured in pixels minus one from start of hsync */
+/* Start of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_START_SHIFT 0
# define TV_HBLANK_START_MASK 0x0001fff
#define TV_V_CTL_1 0x6803c
-/** XXX */
+/* XXX */
# define TV_NBR_END_SHIFT 16
# define TV_NBR_END_MASK 0x07ff0000
-/** XXX */
+/* XXX */
# define TV_VI_END_F1_SHIFT 8
# define TV_VI_END_F1_MASK 0x00003f00
-/** XXX */
+/* XXX */
# define TV_VI_END_F2_SHIFT 0
# define TV_VI_END_F2_MASK 0x0000003f
#define TV_V_CTL_2 0x68040
-/** Length of vsync, in half lines */
+/* Length of vsync, in half lines */
# define TV_VSYNC_LEN_MASK 0x07ff0000
# define TV_VSYNC_LEN_SHIFT 16
-/** Offset of the start of vsync in field 1, measured in one less than the
+/* Offset of the start of vsync in field 1, measured in one less than the
* number of half lines.
*/
# define TV_VSYNC_START_F1_MASK 0x00007f00
# define TV_VSYNC_START_F1_SHIFT 8
-/**
+/*
* Offset of the start of vsync in field 2, measured in one less than the
* number of half lines.
*/
@@ -1336,17 +3159,17 @@
# define TV_VSYNC_START_F2_SHIFT 0
#define TV_V_CTL_3 0x68044
-/** Enables generation of the equalization signal */
+/* Enables generation of the equalization signal */
# define TV_EQUAL_ENA (1 << 31)
-/** Length of vsync, in half lines */
+/* Length of vsync, in half lines */
# define TV_VEQ_LEN_MASK 0x007f0000
# define TV_VEQ_LEN_SHIFT 16
-/** Offset of the start of equalization in field 1, measured in one less than
+/* Offset of the start of equalization in field 1, measured in one less than
* the number of half lines.
*/
# define TV_VEQ_START_F1_MASK 0x0007f00
# define TV_VEQ_START_F1_SHIFT 8
-/**
+/*
* Offset of the start of equalization in field 2, measured in one less than
* the number of half lines.
*/
@@ -1354,13 +3177,13 @@
# define TV_VEQ_START_F2_SHIFT 0
#define TV_V_CTL_4 0x68048
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F1_MASK 0x003f0000
# define TV_VBURST_START_F1_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -1368,13 +3191,13 @@
# define TV_VBURST_END_F1_SHIFT 0
#define TV_V_CTL_5 0x6804c
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F2_MASK 0x003f0000
# define TV_VBURST_START_F2_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -1382,13 +3205,13 @@
# define TV_VBURST_END_F2_SHIFT 0
#define TV_V_CTL_6 0x68050
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F3_MASK 0x003f0000
# define TV_VBURST_START_F3_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -1396,13 +3219,13 @@
# define TV_VBURST_END_F3_SHIFT 0
#define TV_V_CTL_7 0x68054
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F4_MASK 0x003f0000
# define TV_VBURST_START_F4_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -1410,56 +3233,56 @@
# define TV_VBURST_END_F4_SHIFT 0
#define TV_SC_CTL_1 0x68060
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA1_EN (1 << 31)
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA2_EN (1 << 30)
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA3_EN (1 << 29)
-/** Sets the subcarrier DDA to reset frequency every other field */
+/* Sets the subcarrier DDA to reset frequency every other field */
# define TV_SC_RESET_EVERY_2 (0 << 24)
-/** Sets the subcarrier DDA to reset frequency every fourth field */
+/* Sets the subcarrier DDA to reset frequency every fourth field */
# define TV_SC_RESET_EVERY_4 (1 << 24)
-/** Sets the subcarrier DDA to reset frequency every eighth field */
+/* Sets the subcarrier DDA to reset frequency every eighth field */
# define TV_SC_RESET_EVERY_8 (2 << 24)
-/** Sets the subcarrier DDA to never reset the frequency */
+/* Sets the subcarrier DDA to never reset the frequency */
# define TV_SC_RESET_NEVER (3 << 24)
-/** Sets the peak amplitude of the colorburst.*/
+/* Sets the peak amplitude of the colorburst.*/
# define TV_BURST_LEVEL_MASK 0x00ff0000
# define TV_BURST_LEVEL_SHIFT 16
-/** Sets the increment of the first subcarrier phase generation DDA */
+/* Sets the increment of the first subcarrier phase generation DDA */
# define TV_SCDDA1_INC_MASK 0x00000fff
# define TV_SCDDA1_INC_SHIFT 0
#define TV_SC_CTL_2 0x68064
-/** Sets the rollover for the second subcarrier phase generation DDA */
+/* Sets the rollover for the second subcarrier phase generation DDA */
# define TV_SCDDA2_SIZE_MASK 0x7fff0000
# define TV_SCDDA2_SIZE_SHIFT 16
-/** Sets the increent of the second subcarrier phase generation DDA */
+/* Sets the increent of the second subcarrier phase generation DDA */
# define TV_SCDDA2_INC_MASK 0x00007fff
# define TV_SCDDA2_INC_SHIFT 0
#define TV_SC_CTL_3 0x68068
-/** Sets the rollover for the third subcarrier phase generation DDA */
+/* Sets the rollover for the third subcarrier phase generation DDA */
# define TV_SCDDA3_SIZE_MASK 0x7fff0000
# define TV_SCDDA3_SIZE_SHIFT 16
-/** Sets the increent of the third subcarrier phase generation DDA */
+/* Sets the increent of the third subcarrier phase generation DDA */
# define TV_SCDDA3_INC_MASK 0x00007fff
# define TV_SCDDA3_INC_SHIFT 0
#define TV_WIN_POS 0x68070
-/** X coordinate of the display from the start of horizontal active */
+/* X coordinate of the display from the start of horizontal active */
# define TV_XPOS_MASK 0x1fff0000
# define TV_XPOS_SHIFT 16
-/** Y coordinate of the display from the start of vertical active (NBR) */
+/* Y coordinate of the display from the start of vertical active (NBR) */
# define TV_YPOS_MASK 0x00000fff
# define TV_YPOS_SHIFT 0
#define TV_WIN_SIZE 0x68074
-/** Horizontal size of the display window, measured in pixels*/
+/* Horizontal size of the display window, measured in pixels*/
# define TV_XSIZE_MASK 0x1fff0000
# define TV_XSIZE_SHIFT 16
-/**
+/*
* Vertical size of the display window, measured in pixels.
*
* Must be even for interlaced modes.
@@ -1468,28 +3291,28 @@
# define TV_YSIZE_SHIFT 0
#define TV_FILTER_CTL_1 0x68080
-/**
+/*
* Enables automatic scaling calculation.
*
* If set, the rest of the registers are ignored, and the calculated values can
* be read back from the register.
*/
# define TV_AUTO_SCALE (1 << 31)
-/**
+/*
* Disables the vertical filter.
*
* This is required on modes more than 1024 pixels wide */
# define TV_V_FILTER_BYPASS (1 << 29)
-/** Enables adaptive vertical filtering */
+/* Enables adaptive vertical filtering */
# define TV_VADAPT (1 << 28)
# define TV_VADAPT_MODE_MASK (3 << 26)
-/** Selects the least adaptive vertical filtering mode */
+/* Selects the least adaptive vertical filtering mode */
# define TV_VADAPT_MODE_LEAST (0 << 26)
-/** Selects the moderately adaptive vertical filtering mode */
+/* Selects the moderately adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MODERATE (1 << 26)
-/** Selects the most adaptive vertical filtering mode */
+/* Selects the most adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MOST (3 << 26)
-/**
+/*
* Sets the horizontal scaling factor.
*
* This should be the fractional part of the horizontal scaling factor divided
@@ -1501,14 +3324,14 @@
# define TV_HSCALE_FRAC_SHIFT 0
#define TV_FILTER_CTL_2 0x68084
-/**
+/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
*/
# define TV_VSCALE_INT_MASK 0x00038000
# define TV_VSCALE_INT_SHIFT 15
-/**
+/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* \sa TV_VSCALE_INT_MASK
@@ -1517,7 +3340,7 @@
# define TV_VSCALE_FRAC_SHIFT 0
#define TV_FILTER_CTL_3 0x68088
-/**
+/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
@@ -1526,7 +3349,7 @@
*/
# define TV_VSCALE_IP_INT_MASK 0x00038000
# define TV_VSCALE_IP_INT_SHIFT 15
-/**
+/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
@@ -1538,26 +3361,26 @@
#define TV_CC_CONTROL 0x68090
# define TV_CC_ENABLE (1 << 31)
-/**
+/*
* Specifies which field to send the CC data in.
*
* CC data is usually sent in field 0.
*/
# define TV_CC_FID_MASK (1 << 27)
# define TV_CC_FID_SHIFT 27
-/** Sets the horizontal position of the CC data. Usually 135. */
+/* Sets the horizontal position of the CC data. Usually 135. */
# define TV_CC_HOFF_MASK 0x03ff0000
# define TV_CC_HOFF_SHIFT 16
-/** Sets the vertical position of the CC data. Usually 21 */
+/* Sets the vertical position of the CC data. Usually 21 */
# define TV_CC_LINE_MASK 0x0000003f
# define TV_CC_LINE_SHIFT 0
#define TV_CC_DATA 0x68094
# define TV_CC_RDY (1 << 31)
-/** Second word of CC data to be transmitted. */
+/* Second word of CC data to be transmitted. */
# define TV_CC_DATA_2_MASK 0x007f0000
# define TV_CC_DATA_2_SHIFT 16
-/** First word of CC data to be transmitted. */
+/* First word of CC data to be transmitted. */
# define TV_CC_DATA_1_MASK 0x0000007f
# define TV_CC_DATA_1_SHIFT 0
@@ -1578,6 +3401,9 @@
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
+#define DP_PIPE_MASK_CHV (3 << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -1587,6 +3413,14 @@
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
/* Signal voltages. These are mostly controlled by the other end */
#define DP_VOLTAGE_0_4 (0 << 25)
#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1606,9 +3440,7 @@
#define DP_PRE_EMPHASIS_SHIFT 22
/* How many wires to use. I guess 3 was too hard */
-#define DP_PORT_WIDTH_1 (0 << 19)
-#define DP_PORT_WIDTH_2 (1 << 19)
-#define DP_PORT_WIDTH_4 (3 << 19)
+#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
#define DP_PORT_WIDTH_MASK (7 << 19)
/* Mystic DPCD version 1.1 special mode */
@@ -1619,32 +3451,32 @@
#define DP_PLL_FREQ_160MHZ (1 << 16)
#define DP_PLL_FREQ_MASK (3 << 16)
-/** locked once port is enabled */
+/* locked once port is enabled */
#define DP_PORT_REVERSAL (1 << 15)
/* eDP */
#define DP_PLL_ENABLE (1 << 14)
-/** sends the clock on lane 15 of the PEG for debug */
+/* sends the clock on lane 15 of the PEG for debug */
#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
#define DP_SCRAMBLING_DISABLE (1 << 12)
#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
-/** limit RGB values to avoid confusing TVs */
+/* limit RGB values to avoid confusing TVs */
#define DP_COLOR_RANGE_16_235 (1 << 8)
-/** Turn on the audio link */
+/* Turn on the audio link */
#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
-/** vs and hs sync polarity */
+/* vs and hs sync polarity */
#define DP_SYNC_VS_HIGH (1 << 4)
#define DP_SYNC_HS_HIGH (1 << 3)
-/** A fantasy */
+/* A fantasy */
#define DP_DETECTED (1 << 2)
-/** The aux channel provides a way to talk to the
+/* The aux channel provides a way to talk to the
* signal sink for DDC etc. Max packet size supported
* is 20 bytes in each direction, hence the 5 fixed
* data registers
@@ -1712,17 +3544,19 @@
* which is after the LUTs, so we want the bytes for our color format.
* For our current usage, this is always 3, one byte for R, G and B.
*/
-#define PIPEA_GMCH_DATA_M 0x70050
-#define PIPEB_GMCH_DATA_M 0x71050
+#define _PIPEA_DATA_M_G4X 0x70050
+#define _PIPEB_DATA_M_G4X 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
-#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
+#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE_SHIFT 25
+#define TU_SIZE_MASK (0x3f << 25)
-#define PIPE_GMCH_DATA_M_MASK (0xffffff)
+#define DATA_LINK_M_N_MASK (0xffffff)
+#define DATA_LINK_N_MAX (0x800000)
-#define PIPEA_GMCH_DATA_N 0x70054
-#define PIPEB_GMCH_DATA_N 0x71054
+#define _PIPEA_DATA_N_G4X 0x70054
+#define _PIPEB_DATA_N_G4X 0x71054
#define PIPE_GMCH_DATA_N_MASK (0xffffff)
/*
@@ -1736,70 +3570,205 @@
* Attributes and VB-ID.
*/
-#define PIPEA_DP_LINK_M 0x70060
-#define PIPEB_DP_LINK_M 0x71060
+#define _PIPEA_LINK_M_G4X 0x70060
+#define _PIPEB_LINK_M_G4X 0x71060
#define PIPEA_DP_LINK_M_MASK (0xffffff)
-#define PIPEA_DP_LINK_N 0x70064
-#define PIPEB_DP_LINK_N 0x71064
+#define _PIPEA_LINK_N_G4X 0x70064
+#define _PIPEB_LINK_N_G4X 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
+#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
+
/* Display & cursor control */
-/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
/* Pipe A */
-#define PIPEADSL 0x70000
-#define PIPEACONF 0x70008
-#define PIPEACONF_ENABLE (1<<31)
-#define PIPEACONF_DISABLE 0
-#define PIPEACONF_DOUBLE_WIDE (1<<30)
+#define _PIPEADSL 0x70000
+#define DSL_LINEMASK_GEN2 0x00000fff
+#define DSL_LINEMASK_GEN3 0x00001fff
+#define _PIPEACONF 0x70008
+#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_DISABLE 0
+#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPEACONF_SINGLE_WIDE 0
-#define PIPEACONF_PIPE_UNLOCKED 0
-#define PIPEACONF_PIPE_LOCKED (1<<25)
-#define PIPEACONF_PALETTE 0
-#define PIPEACONF_GAMMA (1<<24)
+#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
+#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define PIPECONF_SINGLE_WIDE 0
+#define PIPECONF_PIPE_UNLOCKED 0
+#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PALETTE 0
+#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
-#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_MASK (7 << 21)
+#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
+/* Note that pre-gen3 does not support interlaced display directly. Panel
+ * fitting must be disabled on pre-ilk for interlaced. */
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */
+/* Ironlake and later have a complete new set of values for interlaced. PFIT
+ * means panel fitter required, PF means progressive fetch, DBL means power
+ * saving pixel doubling. */
+#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
+#define PIPECONF_INTERLACED_ILK (3 << 21)
+#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
+#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
+#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
+#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
-#define PIPEASTAT 0x70024
+#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_BPC_MASK (0x7 << 5)
+#define PIPECONF_8BPC (0<<5)
+#define PIPECONF_10BPC (1<<5)
+#define PIPECONF_6BPC (2<<5)
+#define PIPECONF_12BPC (3<<5)
+#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define PIPECONF_DITHER_TYPE_SP (0<<2)
+#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
#define PIPE_DPST_EVENT_STATUS (1UL<<7)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
+#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
+#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define PIPE_HBLANK_INT_STATUS (1UL<<0)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
-#define PIPE_8BPC (0 << 5)
-#define PIPE_10BPC (1 << 5)
-#define PIPE_6BPC (2 << 5)
-#define PIPE_12BPC (3 << 5)
+
+#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
+
+#define PIPE_A_OFFSET 0x70000
+#define PIPE_B_OFFSET 0x71000
+#define PIPE_C_OFFSET 0x72000
+#define CHV_PIPE_C_OFFSET 0x74000
+/*
+ * There's actually no pipe EDP. Some pipe registers have
+ * simply shifted from the pipe to the transcoder, while
+ * keeping their original offset. Thus we need PIPE_EDP_OFFSET
+ * to access such registers in transcoder EDP.
+ */
+#define PIPE_EDP_OFFSET 0x7f000
+
+#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \
+ dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+
+#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
+#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
+
+#define _PIPE_MISC_A 0x70030
+#define _PIPE_MISC_B 0x71030
+#define PIPEMISC_DITHER_BPC_MASK (7<<5)
+#define PIPEMISC_DITHER_8_BPC (0<<5)
+#define PIPEMISC_DITHER_10_BPC (1<<5)
+#define PIPEMISC_DITHER_6_BPC (2<<5)
+#define PIPEMISC_DITHER_12_BPC (3<<5)
+#define PIPEMISC_DITHER_ENABLE (1<<4)
+#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
+#define PIPEMISC_DITHER_TYPE_SP (0<<2)
+#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
+
+#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
+#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
+#define PIPEB_HLINE_INT_EN (1<<28)
+#define PIPEB_VBLANK_INT_EN (1<<27)
+#define SPRITED_FLIP_DONE_INT_EN (1<<26)
+#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
+#define PLANEB_FLIP_DONE_INT_EN (1<<24)
+#define PIPE_PSR_INT_EN (1<<22)
+#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
+#define PIPEA_HLINE_INT_EN (1<<20)
+#define PIPEA_VBLANK_INT_EN (1<<19)
+#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
+#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
+#define PLANEA_FLIPDONE_INT_EN (1<<16)
+#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
+#define PIPEC_HLINE_INT_EN (1<<12)
+#define PIPEC_VBLANK_INT_EN (1<<11)
+#define SPRITEF_FLIPDONE_INT_EN (1<<10)
+#define SPRITEE_FLIPDONE_INT_EN (1<<9)
+#define PLANEC_FLIPDONE_INT_EN (1<<8)
+
+#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
+#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
+#define PLANEC_INVALID_GTT_INT_EN (1<<25)
+#define CURSORC_INVALID_GTT_INT_EN (1<<24)
+#define CURSORB_INVALID_GTT_INT_EN (1<<23)
+#define CURSORA_INVALID_GTT_INT_EN (1<<22)
+#define SPRITED_INVALID_GTT_INT_EN (1<<21)
+#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
+#define PLANEB_INVALID_GTT_INT_EN (1<<19)
+#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
+#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
+#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define DPINVGTT_EN_MASK 0xff0000
+#define DPINVGTT_EN_MASK_CHV 0xfff0000
+#define SPRITEF_INVALID_GTT_STATUS (1<<11)
+#define SPRITEE_INVALID_GTT_STATUS (1<<10)
+#define PLANEC_INVALID_GTT_STATUS (1<<9)
+#define CURSORC_INVALID_GTT_STATUS (1<<8)
+#define CURSORB_INVALID_GTT_STATUS (1<<7)
+#define CURSORA_INVALID_GTT_STATUS (1<<6)
+#define SPRITED_INVALID_GTT_STATUS (1<<5)
+#define SPRITEC_INVALID_GTT_STATUS (1<<4)
+#define PLANEB_INVALID_GTT_STATUS (1<<3)
+#define SPRITEB_INVALID_GTT_STATUS (1<<2)
+#define SPRITEA_INVALID_GTT_STATUS (1<<1)
+#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define DPINVGTT_STATUS_MASK 0xff
+#define DPINVGTT_STATUS_MASK_CHV 0xfff
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -1809,29 +3778,88 @@
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
-#define DSPFW1 0x70034
+#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW2 0x70038
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
+#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
-#define DSPFW_CURSORA_SHIFT 16
-#define DSPFW3 0x7003c
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
+#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
+#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070)
+#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c)
+
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32 32
+#define DRAIN_LATENCY_PRECISION_16 16
+#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
+#define DDL_CURSORA_PRECISION_32 (1<<31)
+#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_SHIFT 24
+#define DDL_SPRITEB_PRECISION_32 (1<<23)
+#define DDL_SPRITEB_PRECISION_16 (0<<23)
+#define DDL_SPRITEB_SHIFT 16
+#define DDL_SPRITEA_PRECISION_32 (1<<15)
+#define DDL_SPRITEA_PRECISION_16 (0<<15)
+#define DDL_SPRITEA_SHIFT 8
+#define DDL_PLANEA_PRECISION_32 (1<<7)
+#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define DDL_PLANEA_SHIFT 0
+
+#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
+#define DDL_CURSORB_PRECISION_32 (1<<31)
+#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_SHIFT 24
+#define DDL_SPRITED_PRECISION_32 (1<<23)
+#define DDL_SPRITED_PRECISION_16 (0<<23)
+#define DDL_SPRITED_SHIFT 16
+#define DDL_SPRITEC_PRECISION_32 (1<<15)
+#define DDL_SPRITEC_PRECISION_16 (0<<15)
+#define DDL_SPRITEC_SHIFT 8
+#define DDL_PLANEB_PRECISION_32 (1<<7)
+#define DDL_PLANEB_PRECISION_16 (0<<7)
+#define DDL_PLANEB_SHIFT 0
+
+#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
+#define DDL_CURSORC_PRECISION_32 (1<<31)
+#define DDL_CURSORC_PRECISION_16 (0<<31)
+#define DDL_CURSORC_SHIFT 24
+#define DDL_SPRITEF_PRECISION_32 (1<<23)
+#define DDL_SPRITEF_PRECISION_16 (0<<23)
+#define DDL_SPRITEF_SHIFT 16
+#define DDL_SPRITEE_PRECISION_32 (1<<15)
+#define DDL_SPRITEE_PRECISION_16 (0<<15)
+#define DDL_SPRITEE_SHIFT 8
+#define DDL_PLANEC_PRECISION_32 (1<<7)
+#define DDL_PLANEC_PRECISION_16 (0<<7)
+#define DDL_PLANEC_SHIFT 0
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
+#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
-#define I945_FIFO_SIZE 127 /* 945 & 965 */
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
#define I915_FIFO_SIZE 95
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
+#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
@@ -1846,6 +3874,60 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0xffff<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0xff<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0xff)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM0_PIPEC_IVB 0x45200
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_FBC_SHIFT 20
+#define WM1_LP_FBC_SHIFT_BDW 19
+#define WM1_LP_SR_MASK (0x7ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0xff)
+#define WM2_LP_ILK 0x4510c
+#define WM2_LP_EN (1<<31)
+#define WM3_LP_ILK 0x45110
+#define WM3_LP_EN (1<<31)
+#define WM1S_LP_ILK 0x45120
+#define WM2S_LP_IVB 0x45124
+#define WM3S_LP_IVB 0x45128
+#define WM1S_LP_EN (1<<31)
+
+#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
+ (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
+ ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+#define MLTR_WM1_SHIFT 0
+#define MLTR_WM2_SHIFT 8
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD 0x5d10
+#define SSKPD_WM_MASK 0x3f
+#define SSKPD_WM0_SHIFT 0
+#define SSKPD_WM1_SHIFT 8
+#define SSKPD_WM2_SHIFT 16
+#define SSKPD_WM3_SHIFT 24
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -1861,24 +3943,27 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define PIPEAFRAMEHIGH 0x70040
+#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define PIPEAFRAMEPIXEL 0x70044
+#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define PIPEA_FRMCOUNT_GM45 0x70040
-#define PIPEA_FLIPCOUNT_GM45 0x70044
+#define _PIPEA_FRMCOUNT_GM45 0x70040
+#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
+#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45)
/* Cursor A & B regs */
-#define CURACNTR 0x70080
+#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -1889,41 +3974,73 @@
/* New style CUR*CNTR flags */
#define CURSOR_MODE 0x27
#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_128_32B_AX 0x02
+#define CURSOR_MODE_256_32B_AX 0x03
#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
+#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
#define MCURSOR_PIPE_SELECT (1 << 28)
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURABASE 0x70084
-#define CURAPOS 0x70088
+#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
+#define _CURABASE 0x70084
+#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define CURBCNTR 0x700c0
-#define CURBBASE 0x700c4
-#define CURBPOS 0x700c8
+#define _CURBCNTR 0x700c0
+#define _CURBBASE 0x700c4
+#define _CURBPOS 0x700c8
+
+#define _CURBCNTR_IVB 0x71080
+#define _CURBBASE_IVB 0x71084
+#define _CURBPOS_IVB 0x71088
+
+#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \
+ dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+
+#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
+#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
+#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
+
+#define CURSOR_A_OFFSET 0x70080
+#define CURSOR_B_OFFSET 0x700c0
+#define CHV_CURSOR_C_OFFSET 0x700e0
+#define IVB_CURSOR_B_OFFSET 0x71080
+#define IVB_CURSOR_C_OFFSET 0x72080
/* Display A control */
-#define DSPACNTR 0x70180
+#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
+#define DISPPLANE_BGRA555 (0x3<<26)
+#define DISPPLANE_BGRX555 (0x4<<26)
+#define DISPPLANE_BGRX565 (0x5<<26)
+#define DISPPLANE_BGRX888 (0x6<<26)
+#define DISPPLANE_BGRA888 (0x7<<26)
+#define DISPPLANE_RGBX101010 (0x8<<26)
+#define DISPPLANE_RGBA101010 (0x9<<26)
+#define DISPPLANE_BGRX101010 (0xa<<26)
+#define DISPPLANE_RGBX161616 (0xc<<26)
+#define DISPPLANE_RGBX888 (0xe<<26)
+#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
+#define DISPPLANE_SEL_PIPE_SHIFT 24
+#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
-#define DISPPLANE_SEL_PIPE_B (1<<24)
+#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
#define DISPPLANE_SRC_KEY_DISABLE 0
#define DISPPLANE_LINE_DOUBLE (1<<20)
@@ -1932,50 +4049,269 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define DSPAADDR 0x70184
-#define DSPASTRIDE 0x70188
-#define DSPAPOS 0x7018C /* reserved */
-#define DSPASIZE 0x70190
-#define DSPASURF 0x7019C /* 965+ only */
-#define DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAADDR 0x70184
+#define _DSPASTRIDE 0x70188
+#define _DSPAPOS 0x7018C /* reserved */
+#define _DSPASIZE 0x70190
+#define _DSPASURF 0x7019C /* 965+ only */
+#define _DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAOFFSET 0x701A4 /* HSW */
+#define _DSPASURFLIVE 0x701AC
+
+#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
+#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
+#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
+#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
+#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
+#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
+#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
/* VBIOS flags */
-#define SWF00 0x71410
-#define SWF01 0x71414
-#define SWF02 0x71418
-#define SWF03 0x7141c
-#define SWF04 0x71420
-#define SWF05 0x71424
-#define SWF06 0x71428
-#define SWF10 0x70410
-#define SWF11 0x70414
-#define SWF14 0x71420
-#define SWF30 0x72414
-#define SWF31 0x72418
-#define SWF32 0x7241c
+#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410)
+#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414)
+#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418)
+#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c)
+#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420)
+#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424)
+#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428)
+#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410)
+#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414)
+#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420)
+#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414)
+#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418)
+#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c)
/* Pipe B */
-#define PIPEBDSL 0x71000
-#define PIPEBCONF 0x71008
-#define PIPEBSTAT 0x71024
-#define PIPEBFRAMEHIGH 0x71040
-#define PIPEBFRAMEPIXEL 0x71044
-#define PIPEB_FRMCOUNT_GM45 0x71040
-#define PIPEB_FLIPCOUNT_GM45 0x71044
+#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
+#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008)
+#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
+#define _PIPEBFRAMEHIGH 0x71040
+#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044)
/* Display B control */
-#define DSPBCNTR 0x71180
+#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define DSPBADDR 0x71184
-#define DSPBSTRIDE 0x71188
-#define DSPBPOS 0x7118C
-#define DSPBSIZE 0x71190
-#define DSPBSURF 0x7119C
-#define DSPBTILEOFF 0x711A4
+#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188)
+#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C)
+#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190)
+#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
+
+/* Sprite A control */
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE (1<<31)
+#define DVS_GAMMA_ENABLE (1<<30)
+#define DVS_PIXFORMAT_MASK (3<<25)
+#define DVS_FORMAT_YUV422 (0<<25)
+#define DVS_FORMAT_RGBX101010 (1<<25)
+#define DVS_FORMAT_RGBX888 (2<<25)
+#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_PIPE_CSC_ENABLE (1<<24)
+#define DVS_SOURCE_KEY (1<<22)
+#define DVS_RGB_ORDER_XBGR (1<<20)
+#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define DVS_YUV_ORDER_YUYV (0<<16)
+#define DVS_YUV_ORDER_UYVY (1<<16)
+#define DVS_YUV_ORDER_YVYU (2<<16)
+#define DVS_YUV_ORDER_VYUY (3<<16)
+#define DVS_DEST_KEY (1<<2)
+#define DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define DVS_TILED (1<<10)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define _DVSASIZE 0x72190
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define _DVSASURFLIVE 0x721ac
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE (1<<31)
+#define DVS_FILTER_MASK (3<<29)
+#define DVS_FILTER_MEDIUM (0<<29)
+#define DVS_FILTER_ENHANCING (1<<29)
+#define DVS_FILTER_SOFTENING (2<<29)
+#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC 0x72300
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC 0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE (1<<31)
+#define SPRITE_GAMMA_ENABLE (1<<30)
+#define SPRITE_PIXFORMAT_MASK (7<<25)
+#define SPRITE_FORMAT_YUV422 (0<<25)
+#define SPRITE_FORMAT_RGBX101010 (1<<25)
+#define SPRITE_FORMAT_RGBX888 (2<<25)
+#define SPRITE_FORMAT_RGBX161616 (3<<25)
+#define SPRITE_FORMAT_YUV444 (4<<25)
+#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE (1<<24)
+#define SPRITE_SOURCE_KEY (1<<22)
+#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
+#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
+#define SPRITE_YUV_ORDER_YUYV (0<<16)
+#define SPRITE_YUV_ORDER_UYVY (1<<16)
+#define SPRITE_YUV_ORDER_YVYU (2<<16)
+#define SPRITE_YUV_ORDER_VYUY (3<<16)
+#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
+#define SPRITE_INT_GAMMA_ENABLE (1<<13)
+#define SPRITE_TILED (1<<10)
+#define SPRITE_DEST_KEY (1<<2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define _SPRA_SIZE 0x70290
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE (1<<31)
+#define SPRITE_FILTER_MASK (3<<29)
+#define SPRITE_FILTER_MEDIUM (0<<29)
+#define SPRITE_FILTER_ENHANCING (1<<29)
+#define SPRITE_FILTER_SOFTENING (2<<29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _SPRA_GAMC 0x70400
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+
+#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
+#define SP_ENABLE (1<<31)
+#define SP_GAMMA_ENABLE (1<<30)
+#define SP_PIXFORMAT_MASK (0xf<<26)
+#define SP_FORMAT_YUV422 (0<<26)
+#define SP_FORMAT_BGR565 (5<<26)
+#define SP_FORMAT_BGRX8888 (6<<26)
+#define SP_FORMAT_BGRA8888 (7<<26)
+#define SP_FORMAT_RGBX1010102 (8<<26)
+#define SP_FORMAT_RGBA1010102 (9<<26)
+#define SP_FORMAT_RGBX8888 (0xe<<26)
+#define SP_FORMAT_RGBA8888 (0xf<<26)
+#define SP_SOURCE_KEY (1<<22)
+#define SP_YUV_BYTE_ORDER_MASK (3<<16)
+#define SP_YUV_ORDER_YUYV (0<<16)
+#define SP_YUV_ORDER_UYVY (1<<16)
+#define SP_YUV_ORDER_YVYU (2<<16)
+#define SP_YUV_ORDER_VYUY (3<<16)
+#define SP_TILED (1<<10)
+#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
+#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
+#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
+#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
+#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
+#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
+#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
+#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
+#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
+#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
+
+#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
+#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
+#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
+#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
+#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
+#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
+#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
+#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
+#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
+#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
+#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
+
+#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -1983,6 +4319,8 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
+#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
+
/* Ironlake */
#define CPU_VGACNTRL 0x41000
@@ -2003,79 +4341,106 @@
#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_FB_CLOCK_MASK 0xff
#define FDI_PLL_BIOS_1 0x46004
#define FDI_PLL_BIOS_2 0x46008
#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
+#define PCH_3DCGDIS0 0x46020
+# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
+# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
+
+#define PCH_3DCGDIS1 0x46024
+# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
#define FDI_PLL_FREQ_CTL 0x46030
#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define PIPEA_DATA_M1 0x60030
-#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
-#define TU_SIZE_MASK 0x7e000000
-#define PIPEA_DATA_M1_OFFSET 0
-#define PIPEA_DATA_N1 0x60034
-#define PIPEA_DATA_N1_OFFSET 0
-
-#define PIPEA_DATA_M2 0x60038
-#define PIPEA_DATA_M2_OFFSET 0
-#define PIPEA_DATA_N2 0x6003c
-#define PIPEA_DATA_N2_OFFSET 0
-
-#define PIPEA_LINK_M1 0x60040
-#define PIPEA_LINK_M1_OFFSET 0
-#define PIPEA_LINK_N1 0x60044
-#define PIPEA_LINK_N1_OFFSET 0
-
-#define PIPEA_LINK_M2 0x60048
-#define PIPEA_LINK_M2_OFFSET 0
-#define PIPEA_LINK_N2 0x6004c
-#define PIPEA_LINK_N2_OFFSET 0
+#define _PIPEA_DATA_M1 0x60030
+#define PIPE_DATA_M1_OFFSET 0
+#define _PIPEA_DATA_N1 0x60034
+#define PIPE_DATA_N1_OFFSET 0
-/* PIPEB timing regs are same start from 0x61000 */
+#define _PIPEA_DATA_M2 0x60038
+#define PIPE_DATA_M2_OFFSET 0
+#define _PIPEA_DATA_N2 0x6003c
+#define PIPE_DATA_N2_OFFSET 0
-#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_M1_OFFSET 0
-#define PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_N1_OFFSET 0
+#define _PIPEA_LINK_M1 0x60040
+#define PIPE_LINK_M1_OFFSET 0
+#define _PIPEA_LINK_N1 0x60044
+#define PIPE_LINK_N1_OFFSET 0
-#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_M2_OFFSET 0
-#define PIPEB_DATA_N2 0x6103c
-#define PIPEB_DATA_N2_OFFSET 0
+#define _PIPEA_LINK_M2 0x60048
+#define PIPE_LINK_M2_OFFSET 0
+#define _PIPEA_LINK_N2 0x6004c
+#define PIPE_LINK_N2_OFFSET 0
-#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_M1_OFFSET 0
-#define PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_N1_OFFSET 0
+/* PIPEB timing regs are same start from 0x61000 */
-#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_M2_OFFSET 0
-#define PIPEB_LINK_N2 0x6104c
-#define PIPEB_LINK_N2_OFFSET 0
+#define _PIPEB_DATA_M1 0x61030
+#define _PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M2 0x61038
+#define _PIPEB_DATA_N2 0x6103c
+#define _PIPEB_LINK_M1 0x61040
+#define _PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M2 0x61048
+#define _PIPEB_LINK_N2 0x6104c
+
+#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
/* CPU panel fitter */
-#define PFA_CTL_1 0x68080
-#define PFB_CTL_1 0x68880
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
+#define PF_PIPE_SEL_MASK_IVB (3<<29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
#define PF_FILTER_EDGE_ENHANCE (2<<23)
#define PF_FILTER_EDGE_SOFTEN (3<<23)
-#define PFA_WIN_SZ 0x68074
-#define PFB_WIN_SZ 0x68874
-#define PFA_WIN_POS 0x68070
-#define PFB_WIN_POS 0x68870
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+
+#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
/* legacy palette */
-#define LGC_PALETTE_A 0x4a000
-#define LGC_PALETTE_B 0x4a800
+#define _LGC_PALETTE_A 0x4a000
+#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
+
+#define _GAMMA_MODE_A 0x4a480
+#define _GAMMA_MODE_B 0x4ac80
+#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
+#define GAMMA_MODE_MODE_MASK (3 << 0)
+#define GAMMA_MODE_MODE_8BIT (0 << 0)
+#define GAMMA_MODE_MODE_10BIT (1 << 0)
+#define GAMMA_MODE_MODE_12BIT (2 << 0)
+#define GAMMA_MODE_MODE_SPLIT (3 << 0)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -2083,6 +4448,7 @@
#define DE_SPRITEA_FLIP_DONE (1 << 28)
#define DE_PLANEB_FLIP_DONE (1 << 27)
#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
#define DE_PCU_EVENT (1 << 25)
#define DE_GTT_FAULT (1 << 24)
#define DE_POISON (1 << 23)
@@ -2096,72 +4462,320 @@
#define DE_PIPEB_ODD_FIELD (1 << 13)
#define DE_PIPEB_LINE_COMPARE (1 << 12)
#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_CRC_DONE (1 << 2)
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
+
+/* More Ivybridge lolz */
+#define DE_ERR_INT_IVB (1<<30)
+#define DE_GSE_IVB (1<<29)
+#define DE_PCH_EVENT_IVB (1<<28)
+#define DE_DP_A_HOTPLUG_IVB (1<<27)
+#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
+#define DE_PIPEC_VBLANK_IVB (1<<10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
+#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
+#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
+#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
+
+#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
+#define MASTER_INTERRUPT_ENABLE (1<<31)
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
-/* GT interrupt */
-#define GT_SYNC_STATUS (1 << 2)
-#define GT_USER_INTERRUPT (1 << 0)
-
#define GTISR 0x44010
#define GTIMR 0x44014
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define GEN8_MASTER_IRQ 0x44200
+#define GEN8_MASTER_IRQ_CONTROL (1<<31)
+#define GEN8_PCU_IRQ (1<<30)
+#define GEN8_DE_PCH_IRQ (1<<23)
+#define GEN8_DE_MISC_IRQ (1<<22)
+#define GEN8_DE_PORT_IRQ (1<<20)
+#define GEN8_DE_PIPE_C_IRQ (1<<18)
+#define GEN8_DE_PIPE_B_IRQ (1<<17)
+#define GEN8_DE_PIPE_A_IRQ (1<<16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
+#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_PM_IRQ (1<<4)
+#define GEN8_GT_VCS2_IRQ (1<<3)
+#define GEN8_GT_VCS1_IRQ (1<<2)
+#define GEN8_GT_BCS_IRQ (1<<1)
+#define GEN8_GT_RCS_IRQ (1<<0)
+
+#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
+#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
+#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
+#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
+
+#define GEN8_BCS_IRQ_SHIFT 16
+#define GEN8_RCS_IRQ_SHIFT 0
+#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS1_IRQ_SHIFT 0
+#define GEN8_VECS_IRQ_SHIFT 0
+
+#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
+#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
+#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
+#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
+#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
+#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
+#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
+#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
+#define GEN8_PIPE_PRIMARY_FLIP_DONE (1 << 4)
+#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
+#define GEN8_PIPE_VSYNC (1 << 1)
+#define GEN8_PIPE_VBLANK (1 << 0)
+#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN8_PIPE_CURSOR_FAULT | \
+ GEN8_PIPE_SPRITE_FAULT | \
+ GEN8_PIPE_PRIMARY_FAULT)
+
+#define GEN8_DE_PORT_ISR 0x44440
+#define GEN8_DE_PORT_IMR 0x44444
+#define GEN8_DE_PORT_IIR 0x44448
+#define GEN8_DE_PORT_IER 0x4444c
+#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
+#define GEN8_AUX_CHANNEL_A (1 << 0)
+
+#define GEN8_DE_MISC_ISR 0x44460
+#define GEN8_DE_MISC_IMR 0x44464
+#define GEN8_DE_MISC_IIR 0x44468
+#define GEN8_DE_MISC_IER 0x4446c
+#define GEN8_DE_MISC_GSE (1 << 27)
+
+#define GEN8_PCU_ISR 0x444e0
+#define GEN8_PCU_IMR 0x444e4
+#define GEN8_PCU_IIR 0x444e8
+#define GEN8_PCU_IER 0x444ec
+
+#define ILK_DISPLAY_CHICKEN2 0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define ILK_ELPIN_409_SELECT (1 << 25)
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define FUSE_STRAP 0x42014
+#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
+#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
+#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
+#define ILK_HDCP_DISABLE (1 << 25)
+#define ILK_eDP_A_DISABLE (1 << 24)
+#define HSW_CDCLK_LIMIT (1 << 24)
+#define ILK_DESKTOP (1 << 23)
+
+#define ILK_DSPCLK_GATE_D 0x42020
+#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
+#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
+#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
+
+#define IVB_CHICKEN3 0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
+
+#define CHICKEN_PAR1_1 0x42080
+#define DPA_MASK_VBLANK_SRD (1 << 15)
+#define FORCE_ARB_IDLE_PLANES (1 << 14)
+
+#define _CHICKEN_PIPESL_1_A 0x420b0
+#define _CHICKEN_PIPESL_1_B 0x420b4
+#define HSW_FBCQ_DIS (1 << 22)
+#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
+#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
+#define DISP_ARB_CTL2 0x45004
+#define DISP_DATA_PARTITION_5_6 (1<<6)
+#define GEN7_MSG_CTL 0x45010
+#define WAIT_FOR_PCH_RESET_ACK (1<<1)
+#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+#define HSW_NDE_RSTWRN_OPT 0x46408
+#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
+# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+#define COMMON_SLICE_CHICKEN2 0x7014
+# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+
+#define GEN7_L3SQCREG1 0xB010
+#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
+
+#define GEN7_L3CNTLREG1 0xB01C
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
+#define GEN7_L3AGDIS (1<<19)
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+
+#define GEN7_L3SQCREG4 0xb034
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+
+/* GEN8 chicken */
+#define HDC_CHICKEN0 0x7300
+#define HDC_FORCE_NON_COHERENT (1<<4)
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+
+#define HSW_SCRATCH1 0xb038
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
/* PCH */
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
#define SDE_CRT_HOTPLUG (1 << 11)
#define SDE_PORTD_HOTPLUG (1 << 10)
#define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
-#define SDE_HOTPLUG_MASK (0xf << 8)
+#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
+ SDE_SDVOB_HOTPLUG | \
+ SDE_PORTB_HOTPLUG | \
+ SDE_PORTC_HOTPLUG | \
+ SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_SDVOB_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_ERROR_CPT (1 << 16)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
#define SDEIIR 0xc4008
#define SDEIER 0xc400c
+#define SERR_INT 0xc4040
+#define SERR_INT_POISON (1<<31)
+#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
+#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
+#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
+
/* digital port hotplug */
-#define PCH_PORT_HOTPLUG 0xc4030
+#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
#define PORTD_HOTPLUG_ENABLE (1 << 20)
#define PORTD_PULSE_DURATION_2ms (0)
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
-#define PORTD_HOTPLUG_NO_DETECT (0)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
+#define PORTD_PULSE_DURATION_MASK (3 << 18)
+#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
#define PORTC_PULSE_DURATION_2ms (0)
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
-#define PORTC_HOTPLUG_NO_DETECT (0)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
+#define PORTC_PULSE_DURATION_MASK (3 << 10)
+#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
#define PORTB_PULSE_DURATION_2ms (0)
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
-#define PORTB_HOTPLUG_NO_DETECT (0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
+#define PORTB_PULSE_DURATION_MASK (3 << 2)
+#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
@@ -2177,13 +4791,17 @@
#define PCH_GMBUS4 0xc5110
#define PCH_GMBUS5 0xc5120
-#define PCH_DPLL_A 0xc6014
-#define PCH_DPLL_B 0xc6018
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
-#define PCH_FPA0 0xc6040
-#define PCH_FPA1 0xc6044
-#define PCH_FPB0 0xc6048
-#define PCH_FPB1 0xc604c
+#define _PCH_FPA0 0xc6040
+#define FP_CB_TUNE (0x3<<22)
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB0 0xc6048
+#define _PCH_FPB1 0xc604c
+#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -2202,6 +4820,7 @@
#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
#define DREF_SSC4_DOWNSPREAD (0<<6)
#define DREF_SSC4_CENTERSPREAD (1<<6)
#define DREF_SSC1_DISABLE (0<<1)
@@ -2221,54 +4840,157 @@
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
+#define PCH_DPLL_SEL 0xc7000
+#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4))
+#define TRANS_DPLLA_SEL(pipe) 0
+#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3))
+
/* transcoder */
-#define TRANS_HTOTAL_A 0xe0000
-#define TRANS_HTOTAL_SHIFT 16
-#define TRANS_HACTIVE_SHIFT 0
-#define TRANS_HBLANK_A 0xe0004
-#define TRANS_HBLANK_END_SHIFT 16
-#define TRANS_HBLANK_START_SHIFT 0
-#define TRANS_HSYNC_A 0xe0008
-#define TRANS_HSYNC_END_SHIFT 16
-#define TRANS_HSYNC_START_SHIFT 0
-#define TRANS_VTOTAL_A 0xe000c
-#define TRANS_VTOTAL_SHIFT 16
-#define TRANS_VACTIVE_SHIFT 0
-#define TRANS_VBLANK_A 0xe0010
-#define TRANS_VBLANK_END_SHIFT 16
-#define TRANS_VBLANK_START_SHIFT 0
-#define TRANS_VSYNC_A 0xe0014
-#define TRANS_VSYNC_END_SHIFT 16
-#define TRANS_VSYNC_START_SHIFT 0
-
-#define TRANSA_DATA_M1 0xe0030
-#define TRANSA_DATA_N1 0xe0034
-#define TRANSA_DATA_M2 0xe0038
-#define TRANSA_DATA_N2 0xe003c
-#define TRANSA_DP_LINK_M1 0xe0040
-#define TRANSA_DP_LINK_N1 0xe0044
-#define TRANSA_DP_LINK_M2 0xe0048
-#define TRANSA_DP_LINK_N2 0xe004c
-
-#define TRANS_HTOTAL_B 0xe1000
-#define TRANS_HBLANK_B 0xe1004
-#define TRANS_HSYNC_B 0xe1008
-#define TRANS_VTOTAL_B 0xe100c
-#define TRANS_VBLANK_B 0xe1010
-#define TRANS_VSYNC_B 0xe1014
-
-#define TRANSB_DATA_M1 0xe1030
-#define TRANSB_DATA_N1 0xe1034
-#define TRANSB_DATA_M2 0xe1038
-#define TRANSB_DATA_N2 0xe103c
-#define TRANSB_DP_LINK_M1 0xe1040
-#define TRANSB_DP_LINK_N1 0xe1044
-#define TRANSB_DP_LINK_M2 0xe1048
-#define TRANSB_DP_LINK_N2 0xe104c
-
-#define TRANSACONF 0xf0008
-#define TRANSBCONF 0xf1008
+#define _PCH_TRANS_HTOTAL_A 0xe0000
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+#define _PCH_TRANS_HBLANK_A 0xe0004
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+#define _PCH_TRANS_HSYNC_A 0xe0008
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+#define _PCH_TRANS_VTOTAL_A 0xe000c
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+#define _PCH_TRANS_VBLANK_A 0xe0010
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+#define _PCH_TRANS_VSYNC_A 0xe0014
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
+
+#define _PCH_TRANSA_DATA_M1 0xe0030
+#define _PCH_TRANSA_DATA_N1 0xe0034
+#define _PCH_TRANSA_DATA_M2 0xe0038
+#define _PCH_TRANSA_DATA_N2 0xe003c
+#define _PCH_TRANSA_LINK_M1 0xe0040
+#define _PCH_TRANSA_LINK_N1 0xe0044
+#define _PCH_TRANSA_LINK_M2 0xe0048
+#define _PCH_TRANSA_LINK_N2 0xe004c
+
+/* Per-transcoder DIP controls */
+
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_GCP_A 0xe0210
+
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define _VIDEO_DIP_GCP_B 0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
+#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
+#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
+
+#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
+#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A 0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define HSW_VIDEO_DIP_GCP_A 0x60210
+
+#define HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+#define HSW_VIDEO_DIP_GCP_B 0x61210
+
+#define HSW_TVIDEO_DIP_CTL(trans) \
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
+#define HSW_TVIDEO_DIP_VS_DATA(trans) \
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
+#define HSW_TVIDEO_DIP_GCP(trans) \
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
+ _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
+
+#define HSW_STEREO_3D_CTL_A 0x70020
+#define S3D_ENABLE (1<<31)
+#define HSW_STEREO_3D_CTL_B 0x71020
+
+#define HSW_STEREO_3D_CTL(trans) \
+ _PIPE2(trans, HSW_STEREO_3D_CTL_A)
+
+#define _PCH_TRANS_HTOTAL_B 0xe1000
+#define _PCH_TRANS_HBLANK_B 0xe1004
+#define _PCH_TRANS_HSYNC_B 0xe1008
+#define _PCH_TRANS_VTOTAL_B 0xe100c
+#define _PCH_TRANS_VBLANK_B 0xe1010
+#define _PCH_TRANS_VSYNC_B 0xe1014
+#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
+
+#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
+ _PCH_TRANS_VSYNCSHIFT_B)
+
+#define _PCH_TRANSB_DATA_M1 0xe1030
+#define _PCH_TRANSB_DATA_N1 0xe1034
+#define _PCH_TRANSB_DATA_M2 0xe1038
+#define _PCH_TRANSB_DATA_N2 0xe103c
+#define _PCH_TRANSB_LINK_M1 0xe1040
+#define _PCH_TRANSB_LINK_N1 0xe1044
+#define _PCH_TRANSB_LINK_M2 0xe1048
+#define _PCH_TRANSB_LINK_N2 0xe104c
+
+#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+
+#define _PCH_TRANSACONF 0xf0008
+#define _PCH_TRANSBCONF 0xf1008
+#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@@ -2278,21 +5000,55 @@
#define TRANS_FSYNC_DELAY_HB2 (1<<27)
#define TRANS_FSYNC_DELAY_HB3 (2<<27)
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_VIDEO_AUDIO (0<<26)
+#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
+#define TRANS_INTERLACED (3<<21)
+#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
#define TRANS_8BPC (0<<5)
#define TRANS_10BPC (1<<5)
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
-#define FDI_RXA_CHICKEN 0xc200c
-#define FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
+
+#define SOUTH_CHICKEN1 0xc2000
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
+#define SOUTH_CHICKEN2 0xc2004
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+
+#define _FDI_RXA_CHICKEN 0xc200c
+#define _FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D 0xc2020
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
-#define FDI_TXA_CTL 0x60100
-#define FDI_TXB_CTL 0x61100
+#define _FDI_TXA_CTL 0x60100
+#define _FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -2307,29 +5063,52 @@
#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
-#define FDI_DP_PORT_WIDTH_X1 (0<<19)
-#define FDI_DP_PORT_WIDTH_X2 (1<<19)
-#define FDI_DP_PORT_WIDTH_X3 (2<<19)
-#define FDI_DP_PORT_WIDTH_X4 (3<<19)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
+#define FDI_DP_PORT_WIDTH_SHIFT 19
+#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
+#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
/* Ironlake: hardwired to 1 */
#define FDI_TX_PLL_ENABLE (1<<14)
+
+/* Ivybridge has different bits for lolz */
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
+#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
+
/* both Tx and Rx */
+#define FDI_COMPOSITE_SYNC (1<<11)
+#define FDI_LINK_TRAIN_AUTO (1<<10)
#define FDI_SCRAMBLING_ENABLE (0<<7)
#define FDI_SCRAMBLING_DISABLE (1<<7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
-#define FDI_RXA_CTL 0xf000c
-#define FDI_RXB_CTL 0xf100c
+#define _FDI_RXA_CTL 0xf000c
+#define _FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
-#define FDI_RX_DISABLE (0<<31)
/* train, dp width same as FDI_TX */
-#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_FS_ERRC_ENABLE (1<<27)
+#define FDI_FE_ERRC_ENABLE (1<<26)
+#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
#define FDI_12BPC (3<<16)
-#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
#define FDI_RX_PLL_ENABLE (1<<13)
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
@@ -2337,15 +5116,32 @@
#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_SEL_RAWCLK (0<<4)
-#define FDI_SEL_PCDCLK (1<<4)
-
-#define FDI_RXA_MISC 0xf0010
-#define FDI_RXB_MISC 0xf1010
-#define FDI_RXA_TUSIZE1 0xf0030
-#define FDI_RXA_TUSIZE2 0xf0038
-#define FDI_RXB_TUSIZE1 0xf1030
-#define FDI_RXB_TUSIZE2 0xf1038
+#define FDI_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
+#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
+#define _FDI_RXA_TUSIZE1 0xf0030
+#define _FDI_RXA_TUSIZE2 0xf0038
+#define _FDI_RXB_TUSIZE1 0xf1030
+#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -2360,88 +5156,74 @@
#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
-#define FDI_RXA_IIR 0xf0014
-#define FDI_RXA_IMR 0xf0018
-#define FDI_RXB_IIR 0xf1014
-#define FDI_RXB_IMR 0xf1018
+#define _FDI_RXA_IIR 0xf0014
+#define _FDI_RXA_IMR 0xf0018
+#define _FDI_RXB_IIR 0xf1014
+#define _FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
-/* CRT */
-#define PCH_ADPA 0xe1100
-#define ADPA_TRANS_SELECT_MASK (1<<30)
-#define ADPA_TRANS_A_SELECT 0
-#define ADPA_TRANS_B_SELECT (1<<30)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
-/* or SDVOB */
-#define HDMIB 0xe1140
-#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define COLOR_FORMAT_8bpc (0)
-#define COLOR_FORMAT_12bpc (3 << 26)
-#define SDVOB_HOTPLUG_ENABLE (1 << 23)
-#define SDVO_ENCODING (0)
-#define TMDS_ENCODING (2 << 10)
-#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
-#define SDVOB_BORDER_ENABLE (1 << 7)
-#define AUDIO_ENABLE (1 << 6)
-#define VSYNC_ACTIVE_HIGH (1 << 4)
-#define HSYNC_ACTIVE_HIGH (1 << 3)
-#define PORT_DETECTED (1 << 2)
-
-#define HDMIC 0xe1150
-#define HDMID 0xe1160
-
#define PCH_LVDS 0xe1180
#define LVDS_DETECTED (1 << 1)
-#define BLC_PWM_CPU_CTL2 0x48250
-#define PWM_ENABLE (1 << 31)
-#define PWM_PIPE_A (0 << 29)
-#define PWM_PIPE_B (1 << 29)
-#define BLC_PWM_CPU_CTL 0x48254
-
-#define BLC_PWM_PCH_CTL1 0xc8250
-#define PWM_PCH_ENABLE (1 << 31)
-#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
-#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
-#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
-#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
-
-#define BLC_PWM_PCH_CTL2 0xc8254
+/* vlv has 2 sets of panel control regs. */
+#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
+#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
+#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
+#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
+#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
+#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
+#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
+
+#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
+#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
+#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
+#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
+#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
+
+#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
+#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
+#define VLV_PIPE_PP_ON_DELAYS(pipe) \
+ _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
+#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
+ _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
+#define VLV_PIPE_PP_DIVISOR(pipe) \
+ _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
+#define PANEL_UNLOCK_MASK (0xffff << 16)
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define PCH_PP_ON_DELAYS 0xc7208
-#define EDP_PANEL (1 << 30)
+#define PANEL_PORT_SELECT_MASK (3 << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_DPA (1 << 30)
+#define PANEL_PORT_SELECT_DPC (2 << 30)
+#define PANEL_PORT_SELECT_DPD (3 << 30)
+#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
#define PCH_PP_DIVISOR 0xc7210
+#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
@@ -2467,4 +5249,1174 @@
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
+#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
+#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
+#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
+#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
+#define TRANS_DP_OUTPUT_ENABLE (1<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_NONE (3<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+#define TRANS_DP_SYNC_MASK (3<<3)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
+#define VLV_PMWGICZ 0x1300a4
+
+#define FORCEWAKE 0xA18C
+#define FORCEWAKE_VLV 0x1300b0
+#define FORCEWAKE_ACK_VLV 0x1300b4
+#define FORCEWAKE_MEDIA_VLV 0x1300b8
+#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc
+#define FORCEWAKE_ACK_HSW 0x130044
+#define FORCEWAKE_ACK 0x130090
+#define VLV_GTLC_WAKE_CTRL 0x130090
+#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
+#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
+#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
+
+#define VLV_GTLC_PW_STATUS 0x130094
+#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
+#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
+#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_KERNEL 0x1
+#define FORCEWAKE_USER 0x2
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
+#define VLV_SPAREG2H 0xA194
+
+#define GTFIFODBG 0x120000
+#define GT_FIFO_SBDROPERR (1<<6)
+#define GT_FIFO_BLOBDROPERR (1<<5)
+#define GT_FIFO_SB_READ_ABORTERR (1<<4)
+#define GT_FIFO_DROPERR (1<<3)
+#define GT_FIFO_OVFERR (1<<2)
+#define GT_FIFO_IAWRERR (1<<1)
+#define GT_FIFO_IARDERR (1<<0)
+
+#define GTFIFOCTL 0x120008
+#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+
+#define HSW_IDICR 0x9008
+#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+#define HSW_EDRAM_PRESENT 0x120010
+
+#define GEN6_UCGCTL1 0x9400
+# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
+
+#define GEN6_UCGCTL2 0x9404
+# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
+# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
+#define GEN6_UCGCTL3 0x9408
+
+#define GEN7_UCGCTL4 0x940c
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+
+#define GEN6_RCGCTL1 0x9410
+#define GEN6_RCGCTL2 0x9414
+#define GEN6_RSTCTL 0x9420
+
+#define GEN8_UCGCTL6 0x9430
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
+
+#define GEN6_GFXPAUSE 0xA000
+#define GEN6_RPNSWREQ 0xA008
+#define GEN6_TURBO_DISABLE (1<<31)
+#define GEN6_FREQUENCY(x) ((x)<<25)
+#define HSW_FREQUENCY(x) ((x)<<24)
+#define GEN6_OFFSET(x) ((x)<<19)
+#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_RC_VIDEO_FREQ 0xA00C
+#define GEN6_RC_CONTROL 0xA090
+#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
+#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
+#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
+#define GEN7_RC_CTL_TO_MODE (1<<28)
+#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
+#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RP_DOWN_TIMEOUT 0xA010
+#define GEN6_RP_INTERRUPT_LIMITS 0xA014
+#define GEN6_RPSTAT1 0xA01C
+#define GEN6_CAGF_SHIFT 8
+#define HSW_CAGF_SHIFT 7
+#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
+#define GEN6_RP_CONTROL 0xA024
+#define GEN6_RP_MEDIA_TURBO (1<<11)
+#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
+#define GEN6_RP_MEDIA_HW_MODE (1<<9)
+#define GEN6_RP_MEDIA_SW_MODE (0<<9)
+#define GEN6_RP_MEDIA_IS_GFX (1<<8)
+#define GEN6_RP_ENABLE (1<<7)
+#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
+#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
+#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
+#define GEN6_RP_UP_THRESHOLD 0xA02C
+#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_CUR_UP_EI 0xA050
+#define GEN6_CURICONT_MASK 0xffffff
+#define GEN6_RP_CUR_UP 0xA054
+#define GEN6_CURBSYTAVG_MASK 0xffffff
+#define GEN6_RP_PREV_UP 0xA058
+#define GEN6_RP_CUR_DOWN_EI 0xA05C
+#define GEN6_CURIAVG_MASK 0xffffff
+#define GEN6_RP_CUR_DOWN 0xA060
+#define GEN6_RP_PREV_DOWN 0xA064
+#define GEN6_RP_UP_EI 0xA068
+#define GEN6_RP_DOWN_EI 0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RPDEUHWTC 0xA080
+#define GEN6_RPDEUC 0xA084
+#define GEN6_RPDEUCSW 0xA088
+#define GEN6_RC_STATE 0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
+#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RCUBMABDTMR 0xA0B0
+#define GEN6_RC1e_THRESHOLD 0xA0B4
+#define GEN6_RC6_THRESHOLD 0xA0B8
+#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define VLV_RCEDATA 0xA0BC
+#define GEN6_RC6pp_THRESHOLD 0xA0C0
+#define GEN6_PMINTRMSK 0xA168
+#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define VLV_PWRDWNUPCTL 0xA294
+
+#define GEN6_PMISR 0x44020
+#define GEN6_PMIMR 0x44024 /* rps_lock */
+#define GEN6_PMIIR 0x44028
+#define GEN6_PMIER 0x4402C
+#define GEN6_PM_MBOX_EVENT (1<<25)
+#define GEN6_PM_THERMAL_EVENT (1<<24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
+#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
+ GEN6_PM_RP_DOWN_THRESHOLD | \
+ GEN6_PM_RP_DOWN_TIMEOUT)
+
+#define GEN7_GT_SCRATCH_BASE 0x4F100
+#define GEN7_GT_SCRATCH_REG_NUM 8
+
+#define VLV_GTLC_SURVIVABILITY_REG 0x130098
+#define VLV_GFX_CLK_STATUS_BIT (1<<3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
+
+#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define VLV_COUNTER_CONTROL 0x138104
+#define VLV_COUNT_RANGE_HIGH (1<<15)
+#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
+#define VLV_RENDER_RC6_COUNT_EN (1<<0)
+#define GEN6_GT_GFX_RC6 0x138108
+#define VLV_GT_RENDER_RC6 0x138108
+#define VLV_GT_MEDIA_RC6 0x13810C
+
+#define GEN6_GT_GFX_RC6p 0x13810C
+#define GEN6_GT_GFX_RC6pp 0x138110
+
+#define GEN6_PCODE_MAILBOX 0x138124
+#define GEN6_PCODE_READY (1<<31)
+#define GEN6_READ_OC_PARAMS 0xc
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_PCODE_READ_D_COMP 0x10
+#define GEN6_PCODE_WRITE_D_COMP 0x11
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define DISPLAY_IPS_CONTROL 0x19
+#define GEN6_PCODE_DATA 0x138128
+#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
+
+#define GEN6_GT_CORE_STATUS 0x138060
+#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
+#define GEN7_MISCCPCTL (0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+
+/* IVYBRIDGE DPF */
+#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
+#define GEN7_PARITY_ERROR_VALID (1<<13)
+#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_PARITY_ERROR_ROW(reg) \
+ ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+#define GEN7_PARITY_ERROR_BANK(reg) \
+ ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+#define GEN7_PARITY_ERROR_SUBBANK(reg) \
+ ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1<<7)
+
+#define GEN7_L3LOG_BASE 0xB070
+#define HSW_L3LOG_BASE_SLICE1 0xB270
+#define GEN7_L3LOG_SIZE 0x80
+
+#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
+#define GEN7_MAX_PS_THREAD_DEP (8<<12)
+#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+
+#define GEN8_ROW_CHICKEN 0xe4f0
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
+#define STALL_DOP_GATING_DISABLE (1<<5)
+
+#define GEN7_ROW_CHICKEN2 0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
+#define DOP_CLOCK_GATING_DISABLE (1<<0)
+
+#define HSW_ROW_CHICKEN3 0xe49c
+#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
+
+#define HALF_SLICE_CHICKEN3 0xe184
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+
+#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
+
+#define G4X_AUD_CNTL_ST 0x620B4
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
+#define G4X_HDMIW_HDMIEDID 0x6210C
+
+#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ IBX_HDMIW_HDMIEDID_A, \
+ IBX_HDMIW_HDMIEDID_B)
+#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ IBX_AUD_CNTL_ST_A, \
+ IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
+#define IBX_ELD_ADDRESS (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 0xE20C0
+#define IBX_ELD_VALIDB (1 << 0)
+#define IBX_CP_READYB (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ CPT_HDMIW_HDMIEDID_A, \
+ CPT_HDMIW_HDMIEDID_B)
+#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ CPT_AUD_CNTL_ST_A, \
+ CPT_AUD_CNTL_ST_B)
+#define CPT_AUD_CNTRL_ST2 0xE50C0
+
+#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ VLV_HDMIW_HDMIEDID_A, \
+ VLV_HDMIW_HDMIEDID_B)
+#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ VLV_AUD_CNTL_ST_A, \
+ VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer. It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
+
+#define IBX_AUD_CONFIG_A 0xe2000
+#define IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+ IBX_AUD_CONFIG_A, \
+ IBX_AUD_CONFIG_B)
+#define CPT_AUD_CONFIG_A 0xe5000
+#define CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+ CPT_AUD_CONFIG_A, \
+ CPT_AUD_CONFIG_B)
+#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
+ VLV_AUD_CONFIG_A, \
+ VLV_AUD_CONFIG_B)
+
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
+/* HSW Audio */
+#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
+#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ HSW_AUD_CONFIG_A, \
+ HSW_AUD_CONFIG_B)
+
+#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
+#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_MISC_CTRL_A, \
+ HSW_AUD_MISC_CTRL_B)
+
+#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
+#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ HSW_AUD_DIG_CNVT_1, \
+ HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define HSW_AUD_EDID_DATA_A 0x65050
+#define HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ HSW_AUD_EDID_DATA_A, \
+ HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
+#define AUDIO_INACTIVE_C (1<<11)
+#define AUDIO_INACTIVE_B (1<<7)
+#define AUDIO_INACTIVE_A (1<<3)
+#define AUDIO_OUTPUT_ENABLE_A (1<<2)
+#define AUDIO_OUTPUT_ENABLE_B (1<<6)
+#define AUDIO_OUTPUT_ENABLE_C (1<<10)
+#define AUDIO_ELD_VALID_A (1<<0)
+#define AUDIO_ELD_VALID_B (1<<4)
+#define AUDIO_ELD_VALID_C (1<<8)
+#define AUDIO_CP_READY_A (1<<1)
+#define AUDIO_CP_READY_B (1<<5)
+#define AUDIO_CP_READY_C (1<<9)
+
+/* HSW Power Wells */
+#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
+#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
+#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
+#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
+#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
+#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
+
+/* Per-pipe DDI Function Control */
+#define TRANS_DDI_FUNC_CTL_A 0x60400
+#define TRANS_DDI_FUNC_CTL_B 0x61400
+#define TRANS_DDI_FUNC_CTL_C 0x62400
+#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
+
+#define TRANS_DDI_FUNC_ENABLE (1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
+#define TRANS_DDI_PORT_NONE (0<<28)
+#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
+#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
+#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
+#define TRANS_DDI_BPC_MASK (7<<20)
+#define TRANS_DDI_BPC_8 (0<<20)
+#define TRANS_DDI_BPC_10 (1<<20)
+#define TRANS_DDI_BPC_6 (2<<20)
+#define TRANS_DDI_BPC_12 (3<<20)
+#define TRANS_DDI_PVSYNC (1<<17)
+#define TRANS_DDI_PHSYNC (1<<16)
+#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_BFI_ENABLE (1<<4)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A 0x64040
+#define DP_TP_CTL_B 0x64140
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A 0x64044
+#define DP_TP_STATUS_B 0x64144
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A 0x64000
+#define DDI_BUF_CTL_B 0x64100
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
+/* Haswell */
+#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+/* Broadwell */
+#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_PORT_REVERSAL (1<<16)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_A_4_LANES (1<<4)
+#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
+#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A 0x64E00
+#define DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
+#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_DEST_ICLK (0x0<<16)
+#define SBI_CTL_DEST_MPHY (0x1<<16)
+#define SBI_CTL_OP_IORD (0x2<<8)
+#define SBI_CTL_OP_IOWR (0x3<<8)
+#define SBI_CTL_OP_CRRD (0x6<<8)
+#define SBI_CTL_OP_CRWR (0x7<<8)
+#define SBI_RESPONSE_FAIL (0x1<<1)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1<<3)
+#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_DBUFF0 0x2a00
+#define SBI_GEN0 0x1f00
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE (1<<0)
+#define PIXCLK_GATE_GATE (0<<0)
+
+/* SPLL */
+#define SPLL_CTL 0x46020
+#define SPLL_PLL_ENABLE (1<<31)
+#define SPLL_PLL_SSC (1<<28)
+#define SPLL_PLL_NON_SSC (2<<28)
+#define SPLL_PLL_LCPLL (3<<28)
+#define SPLL_PLL_REF_MASK (3<<28)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
+#define SPLL_PLL_FREQ_2700MHz (2<<26)
+#define SPLL_PLL_FREQ_MASK (3<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
+#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_REF_MASK (0xff)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
+#define WRPLL_DIVIDER_POST_SHIFT 8
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_FB_SHIFT 16
+#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A 0x46100
+#define PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
+#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
+#define PORT_CLK_SEL_LCPLL_810 (2<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL1 (4<<29)
+#define PORT_CLK_SEL_WRPLL2 (5<<29)
+#define PORT_CLK_SEL_NONE (7<<29)
+#define PORT_CLK_SEL_MASK (7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A 0x46140
+#define TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0<<29)
+#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
+
+#define TRANSA_MSA_MISC 0x60410
+#define TRANSB_MSA_MISC 0x61410
+#define TRANSC_MSA_MISC 0x62410
+#define TRANS_EDP_MSA_MISC 0x6f410
+#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
+
+#define TRANS_MSA_SYNC_CLK (1<<0)
+#define TRANS_MSA_6_BPC (0<<5)
+#define TRANS_MSA_8_BPC (1<<5)
+#define TRANS_MSA_10_BPC (2<<5)
+#define TRANS_MSA_12_BPC (3<<5)
+#define TRANS_MSA_16_BPC (4<<5)
+
+/* LCPLL Control */
+#define LCPLL_CTL 0x130040
+#define LCPLL_PLL_DISABLE (1<<31)
+#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CLK_FREQ_MASK (3<<26)
+#define LCPLL_CLK_FREQ_450 (0<<26)
+#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
+#define LCPLL_CLK_FREQ_675_BDW (3<<26)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_POWER_DOWN_ALLOW (1<<22)
+#define LCPLL_CD_SOURCE_FCLK (1<<21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+
+#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
+#define D_COMP_COMP_FORCE (1<<8)
+#define D_COMP_COMP_DISABLE (1<<0)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A 0x45270
+#define PIPE_WM_LINETIME_B 0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_B)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_FUSE_LOCK (1<<13)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
+#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
+#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
+#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+
+#define WM_MISC 0x45260
+#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
+
+#define WM_DBG 0x45280
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
+#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
+#define WM_DBG_DISALLOW_SPRITE (1<<2)
+
+/* pipe CSC */
+#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
+#define _PIPE_A_CSC_COEFF_BY 0x49014
+#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
+#define _PIPE_A_CSC_COEFF_BU 0x4901c
+#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
+#define _PIPE_A_CSC_COEFF_BV 0x49024
+#define _PIPE_A_CSC_MODE 0x49028
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define _PIPE_A_CSC_PREOFF_HI 0x49030
+#define _PIPE_A_CSC_PREOFF_ME 0x49034
+#define _PIPE_A_CSC_PREOFF_LO 0x49038
+#define _PIPE_A_CSC_POSTOFF_HI 0x49040
+#define _PIPE_A_CSC_POSTOFF_ME 0x49044
+#define _PIPE_A_CSC_POSTOFF_LO 0x49048
+
+#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
+#define _PIPE_B_CSC_COEFF_BY 0x49114
+#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
+#define _PIPE_B_CSC_COEFF_BU 0x4911c
+#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
+#define _PIPE_B_CSC_COEFF_BV 0x49124
+#define _PIPE_B_CSC_MODE 0x49128
+#define _PIPE_B_CSC_PREOFF_HI 0x49130
+#define _PIPE_B_CSC_PREOFF_ME 0x49134
+#define _PIPE_B_CSC_PREOFF_LO 0x49138
+#define _PIPE_B_CSC_POSTOFF_HI 0x49140
+#define _PIPE_B_CSC_POSTOFF_ME 0x49144
+#define _PIPE_B_CSC_POSTOFF_LO 0x49148
+
+#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
+#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
+#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
+#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
+#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
+#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
+#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
+#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
+#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
+#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
+#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
+#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
+#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+
+/* VLV MIPI registers */
+
+#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define DPI_ENABLE (1 << 31) /* A + B */
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_MASK (1 << 26)
+#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
+#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
+#define DITHERING_ENABLE (1 << 25) /* A + B */
+#define FLOPPED_HSTX (1 << 23)
+#define DE_INVERT (1 << 19) /* XXX */
+#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
+#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
+#define AFE_LATCHOUT (1 << 17)
+#define LP_OUTPUT_HOLD (1 << 16)
+#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define CSB_SHIFT 9
+#define CSB_MASK (3 << 9)
+#define CSB_20MHZ (0 << 9)
+#define CSB_10MHZ (1 << 9)
+#define CSB_40MHZ (2 << 9)
+#define BANDGAP_MASK (1 << 8)
+#define BANDGAP_PNW_CIRCUIT (0 << 8)
+#define BANDGAP_LNC_CIRCUIT (1 << 8)
+#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
+#define TEARING_EFFECT_SHIFT 2 /* A + B */
+#define TEARING_EFFECT_MASK (3 << 2)
+#define TEARING_EFFECT_OFF (0 << 2)
+#define TEARING_EFFECT_DSI (1 << 2)
+#define TEARING_EFFECT_GPIO (2 << 2)
+#define LANE_CONFIGURATION_SHIFT 0
+#define LANE_CONFIGURATION_MASK (3 << 0)
+#define LANE_CONFIGURATION_4LANE (0 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
+
+#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define TEARING_EFFECT_DELAY_SHIFT 0
+#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
+#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
+#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
+#define ULPS_STATE_MASK (3 << 1)
+#define ULPS_STATE_ENTER (2 << 1)
+#define ULPS_STATE_EXIT (1 << 1)
+#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
+#define DEVICE_READY (1 << 0)
+
+#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
+#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
+#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
+#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
+#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define TEARING_EFFECT (1 << 31)
+#define SPL_PKT_SENT_INTERRUPT (1 << 30)
+#define GEN_READ_DATA_AVAIL (1 << 29)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define RX_PROT_VIOLATION (1 << 26)
+#define RX_INVALID_TX_LENGTH (1 << 25)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define LP_RX_TIMEOUT (1 << 22)
+#define HS_TX_TIMEOUT (1 << 21)
+#define DPI_FIFO_UNDERRUN (1 << 20)
+#define LOW_CONTENTION (1 << 19)
+#define HIGH_CONTENTION (1 << 18)
+#define TXDSI_VC_ID_INVALID (1 << 17)
+#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
+#define TXCHECKSUM_ERROR (1 << 15)
+#define TXECC_MULTIBIT_ERROR (1 << 14)
+#define TXECC_SINGLE_BIT_ERROR (1 << 13)
+#define TXFALSE_CONTROL_ERROR (1 << 12)
+#define RXDSI_VC_ID_INVALID (1 << 11)
+#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
+#define RXCHECKSUM_ERROR (1 << 9)
+#define RXECC_MULTIBIT_ERROR (1 << 8)
+#define RXECC_SINGLE_BIT_ERROR (1 << 7)
+#define RXFALSE_CONTROL_ERROR (1 << 6)
+#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RXEOT_SYNC_ERROR (1 << 2)
+#define RXSOT_SYNC_ERROR (1 << 1)
+#define RXSOT_ERROR (1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
+#define CMD_MODE_NOT_SUPPORTED (0 << 13)
+#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
+#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
+#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
+#define VID_MODE_FORMAT_MASK (0xf << 7)
+#define VID_MODE_NOT_SUPPORTED (0 << 7)
+#define VID_MODE_FORMAT_RGB565 (1 << 7)
+#define VID_MODE_FORMAT_RGB666 (2 << 7)
+#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
+#define VID_MODE_FORMAT_RGB888 (4 << 7)
+#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
+#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
+#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
+#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
+#define DATA_LANES_PRG_REG_SHIFT 0
+#define DATA_LANES_PRG_REG_MASK (7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define TURN_AROUND_TIMEOUT_MASK 0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define DEVICE_RESET_TIMER_MASK 0xffff
+
+#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
+#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
+#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define VERTICAL_ADDRESS_SHIFT 16
+#define VERTICAL_ADDRESS_MASK (0xffff << 16)
+#define HORIZONTAL_ADDRESS_SHIFT 0
+#define HORIZONTAL_ADDRESS_MASK 0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define DBI_FIFO_EMPTY_HALF (0 << 0)
+#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
+#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
+#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
+#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
+#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
+#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
+#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
+#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
+#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
+#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
+#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
+#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define DPI_LP_MODE (1 << 6)
+#define BACKLIGHT_OFF (1 << 5)
+#define BACKLIGHT_ON (1 << 4)
+#define COLOR_MODE_OFF (1 << 3)
+#define COLOR_MODE_ON (1 << 2)
+#define TURN_ON (1 << 1)
+#define SHUTDOWN (1 << 0)
+
+#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
+#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
+#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define COMMAND_BYTE_SHIFT 0
+#define COMMAND_BYTE_MASK (0x3f << 0)
+
+#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
+#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
+#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define MASTER_INIT_TIMER_SHIFT 0
+#define MASTER_INIT_TIMER_MASK (0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define MAX_RETURN_PKT_SIZE_SHIFT 0
+#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
+#define DISABLE_VIDEO_BTA (1 << 3)
+#define IP_TG_CONFIG (1 << 2)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
+#define VIDEO_MODE_BURST (3 << 0)
+
+#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
+#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
+#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
+#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
+#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
+#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
+#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
+#define CLOCKSTOP (1 << 1)
+#define EOT_DISABLE (1 << 0)
+
+#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
+#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
+#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define LP_BYTECLK_SHIFT 0
+#define LP_BYTECLK_MASK (0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
+#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
+#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
+#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
+#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
+#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
+#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
+#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define LONG_PACKET_WORD_COUNT_SHIFT 8
+#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
+#define SHORT_PACKET_PARAM_SHIFT 8
+#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
+#define VIRTUAL_CHANNEL_SHIFT 6
+#define VIRTUAL_CHANNEL_MASK (3 << 6)
+#define DATA_TYPE_SHIFT 0
+#define DATA_TYPE_MASK (3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
+#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_FULL (1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define DBI_HS_LP_MODE_MASK (1 << 0)
+#define DBI_LP_MODE (1 << 0)
+#define DBI_HS_MODE (0 << 0)
+
+#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
+#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
+#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define EXIT_ZERO_COUNT_SHIFT 24
+#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
+#define TRAIL_COUNT_SHIFT 16
+#define TRAIL_COUNT_MASK (0x1f << 16)
+#define CLK_ZERO_COUNT_SHIFT 8
+#define CLK_ZERO_COUNT_MASK (0xff << 8)
+#define PREPARE_COUNT_SHIFT 0
+#define PREPARE_COUNT_MASK (0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
+#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
+#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define LP_HS_SSW_CNT_SHIFT 16
+#define LP_HS_SSW_CNT_MASK (0xffff << 16)
+#define HS_LP_PWR_SW_CNT_SHIFT 0
+#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
+#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define STOP_STATE_STALL_COUNTER_SHIFT 0
+#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
+#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
+#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
+#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define RX_CONTENTION_DETECTED (1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
+#define DBI_TYPEC_ENABLE (1 << 31)
+#define DBI_TYPEC_WIP (1 << 30)
+#define DBI_TYPEC_OPTION_SHIFT 28
+#define DBI_TYPEC_OPTION_MASK (3 << 28)
+#define DBI_TYPEC_FREQ_SHIFT 24
+#define DBI_TYPEC_FREQ_MASK (0xf << 24)
+#define DBI_TYPEC_OVERRIDE (1 << 8)
+#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
+#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
+#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
+#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
+#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
+#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
+#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
+#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
+#define READ_REQUEST_PRIORITY_SHIFT 3
+#define READ_REQUEST_PRIORITY_MASK (3 << 3)
+#define READ_REQUEST_PRIORITY_LOW (0 << 3)
+#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
+#define RGB_FLIP_TO_BGR (1 << 2)
+
+#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
+#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
+#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define DATA_MEM_ADDRESS_SHIFT 5
+#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define DATA_VALID (1 << 0)
+
+#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
+#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
+#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define DATA_LENGTH_SHIFT 0
+#define DATA_LENGTH_MASK (0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
+#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define COMMAND_MEM_ADDRESS_SHIFT 5
+#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define AUTO_PWG_ENABLE (1 << 2)
+#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
+#define COMMAND_VALID (1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
+#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
+#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
+#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
+#define MIPI_READ_DATA_RETURN(pipe, n) \
+ (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
+#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
+#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define READ_DATA_VALID(n) (1 << (n))
+
+/* For UMS only (deprecated): */
+#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561d..043123c77a1 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,68 +24,10 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "intel_drv.h"
-
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpll_reg;
-
- if (IS_IRONLAKE(dev)) {
- dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
- } else {
- dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
- }
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (IS_IRONLAKE(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
- else
- array = dev_priv->save_palette_b;
-
- for(i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (IS_IRONLAKE(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
- else
- array = dev_priv->save_palette_b;
-
- for(i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
+#include "i915_reg.h"
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
@@ -127,12 +69,18 @@ static void i915_save_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
+
/* VGA color palette registers */
- dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+ dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
- dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -147,35 +95,35 @@ static void i915_save_vga(struct drm_device *dev)
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
- dev_priv->saveCR[i] =
+ dev_priv->regfile.saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
- dev_priv->saveCR[0x11] &= ~0x80;
+ dev_priv->regfile.saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
- dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
- dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
- dev_priv->saveGR[i] =
+ dev_priv->regfile.saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
- dev_priv->saveGR[0x10] =
+ dev_priv->regfile.saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->saveGR[0x11] =
+ dev_priv->regfile.saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->saveGR[0x18] =
+ dev_priv->regfile.saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
- dev_priv->saveSR[i] =
+ dev_priv->regfile.saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
@@ -185,9 +133,18 @@ static void i915_restore_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
+
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+ POSTING_READ(VGA_PD);
+ udelay(150);
+
/* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -200,529 +157,148 @@ static void i915_restore_vga(struct drm_device *dev)
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->saveSR[i]);
+ dev_priv->regfile.saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->saveGR[i]);
+ dev_priv->regfile.saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->saveGR[0x10]);
+ dev_priv->regfile.saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->saveGR[0x11]);
+ dev_priv->regfile.saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->saveGR[0x18]);
+ dev_priv->regfile.saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+ i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+ I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
-static void i915_save_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (IS_IRONLAKE(dev)) {
- dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (IS_IRONLAKE(dev)) {
- dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
- } else {
- dev_priv->saveFPA0 = I915_READ(FPA0);
- dev_priv->saveFPA1 = I915_READ(FPA1);
- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
- }
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!IS_IRONLAKE(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
-
- if (IS_IRONLAKE(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
- if (IS_I965G(dev)) {
- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (IS_IRONLAKE(dev)) {
- dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
- } else {
- dev_priv->saveFPB0 = I915_READ(FPB0);
- dev_priv->saveFPB1 = I915_READ(FPB1);
- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
- }
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!IS_IRONLAKE(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
-
- if (IS_IRONLAKE(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
- if (IS_I965GM(dev) || IS_GM45(dev)) {
- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
- return;
-}
-
-static void i915_restore_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (IS_IRONLAKE(dev)) {
- dpll_a_reg = PCH_DPLL_A;
- dpll_b_reg = PCH_DPLL_B;
- fpa0_reg = PCH_FPA0;
- fpb0_reg = PCH_FPB0;
- fpa1_reg = PCH_FPA1;
- fpb1_reg = PCH_FPB1;
- } else {
- dpll_a_reg = DPLL_A;
- dpll_b_reg = DPLL_B;
- fpa0_reg = FPA0;
- fpb0_reg = FPB0;
- fpa1_reg = FPA1;
- fpb1_reg = FPB1;
- }
-
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
- DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- DRM_UDELAY(150);
-
- /* Restore mode */
- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!IS_IRONLAKE(dev))
- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
-
- I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
-
- I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
-
- I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
- if (IS_I965G(dev)) {
- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
- }
-
- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
- DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- DRM_UDELAY(150);
-
- /* Restore mode */
- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!IS_IRONLAKE(dev))
- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
-
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
-
- I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
-
- I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
-
- I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
- if (IS_I965G(dev)) {
- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
- }
-
- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
-
- return;
-}
-
-void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->saveDSPARB = I915_READ(DSPARB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
- /* Don't save them in KMS mode */
- i915_save_modeset_reg(dev);
-
- /* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (!IS_I9XX(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
-
- /* CRT state */
- if (IS_IRONLAKE(dev)) {
- dev_priv->saveADPA = I915_READ(PCH_ADPA);
- } else {
- dev_priv->saveADPA = I915_READ(ADPA);
- }
+ /* Don't regfile.save them in KMS mode */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_display_reg(dev);
/* LVDS state */
- if (IS_IRONLAKE(dev)) {
- dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+
+ dev_priv->regfile.saveBLC_HIST_CTL =
+ I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
+ dev_priv->regfile.saveBLC_HIST_CTL_B =
+ I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
} else {
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (IS_I965G(dev))
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->saveLVDS = I915_READ(LVDS);
- }
-
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
-
- if (IS_IRONLAKE(dev)) {
- dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
- } else {
- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->saveDP_B = I915_READ(DP_B);
- dev_priv->saveDP_C = I915_READ(DP_C);
- dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
- }
- /* FIXME: save TV & SDVO state */
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- /* FBC state */
- if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
- /* VGA state */
- dev_priv->saveVGA0 = I915_READ(VGA0);
- dev_priv->saveVGA1 = I915_READ(VGA1);
- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (IS_IRONLAKE(dev))
- dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
- else
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+ /* save FBC interval */
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
- i915_save_vga(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_vga(dev);
}
-void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 mask = 0xffffffff;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
- }
+ if (INTEL_INFO(dev)->gen <= 4)
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- /* This is only meaningful in non-KMS mode */
- /* Don't restore them in KMS mode */
- i915_restore_modeset_reg(dev);
-
- /* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (!IS_I9XX(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
-
- /* CRT state */
- if (IS_IRONLAKE(dev))
- I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->saveADPA);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_display_reg(dev);
- /* LVDS state */
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
-
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
- } else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->saveLVDS);
-
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
-
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ mask = ~LVDS_PORT_EN;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
+ else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ I915_WRITE(RSTDBYCTL,
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+ } else if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
+ dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
+ dev_priv->regfile.saveBLC_HIST_CTL);
} else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->saveDP_B);
- I915_WRITE(DP_C, dev_priv->saveDP_C);
- I915_WRITE(DP_D, dev_priv->saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
+ /* only restore FBC info on the platform that supports FBC*/
+ intel_disable_fbc(dev);
- /* FBC info */
- if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else {
- i8xx_disable_fbc(dev);
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
- }
+ /* restore FBC interval */
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- /* VGA state */
- if (IS_IRONLAKE(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_vga(dev);
else
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->saveVGA0);
- I915_WRITE(VGA1, dev_priv->saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
- DRM_UDELAY(150);
-
- i915_restore_vga(dev);
+ i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
@@ -730,52 +306,44 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
+ mutex_lock(&dev->struct_mutex);
i915_save_display(dev);
- /* Interrupt state */
- if (IS_IRONLAKE(dev)) {
- dev_priv->saveDEIER = I915_READ(DEIER);
- dev_priv->saveDEIMR = I915_READ(DEIMR);
- dev_priv->saveGTIER = I915_READ(GTIER);
- dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
- } else {
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+ dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+ dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+ dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+ dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->regfile.saveIER = I915_READ(IER);
+ dev_priv->regfile.saveIMR = I915_READ(IMR);
+ }
}
/* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ if (INTEL_INFO(dev)->gen < 7)
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
- /* Fences */
- if (IS_I965G(dev)) {
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- } else {
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- }
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -785,57 +353,45 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
-
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
- /* Fences */
- if (IS_I965G(dev)) {
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- }
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_fences(dev);
i915_restore_display(dev);
- /* Interrupt state */
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(DEIER, dev_priv->saveDEIER);
- I915_WRITE(DEIMR, dev_priv->saveDEIMR);
- I915_WRITE(GTIER, dev_priv->saveGTIER);
- I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
- } else {
- I915_WRITE (IER, dev_priv->saveIER);
- I915_WRITE (IMR, dev_priv->saveIMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->regfile.saveIER);
+ I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+ }
}
- /* Clock gating state */
- intel_init_clock_gating(dev);
-
/* Cache mode state */
- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+ if (INTEL_INFO(dev)->gen < 7)
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+ 0xffff0000);
/* Memory arbitration state */
- I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+ I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
+
+ mutex_unlock(&dev->struct_mutex);
- /* I2C state */
- intel_i2c_reset_gmbus(dev);
+ intel_i2c_reset(dev);
return 0;
}
-
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 00000000000..86ce39aad0f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,609 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+#ifdef CONFIG_PM
+static u32 calc_residency(struct drm_device *dev, const u32 reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 raw_time; /* 32b value may overflow during fixed point math */
+ u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
+ u32 ret;
+
+ if (!intel_enable_rc6(dev))
+ return 0;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* On VLV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(dev)) {
+ u32 clkctl2;
+
+ clkctl2 = I915_READ(VLV_CLK_CTL2) >>
+ CLK_CTL2_CZCOUNT_30NS_SHIFT;
+ if (!clkctl2) {
+ WARN(!clkctl2, "bogus CZ count value");
+ ret = 0;
+ goto out;
+ }
+ units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ units <<= 8;
+
+ div = 1000000ULL * bias;
+ }
+
+ raw_time = I915_READ(reg) * units;
+ ret = DIV_ROUND_UP_ULL(raw_time, div);
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+static ssize_t
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
+}
+
+static ssize_t
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = dev_get_drvdata(kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+}
+
+static ssize_t
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
+ u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ if (IS_VALLEYVIEW(dminor->dev))
+ rc6p_residency = 0;
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
+}
+
+static ssize_t
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = dev_to_drm_minor(kdev);
+ u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ if (IS_VALLEYVIEW(dminor->dev))
+ rc6pp_residency = 0;
+ return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
+}
+
+static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
+static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
+static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
+static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+
+static struct attribute *rc6_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6_attrs
+};
+#endif
+
+static int l3_access_valid(struct drm_device *dev, loff_t offset)
+{
+ if (!HAS_L3_DPF(dev))
+ return -EPERM;
+
+ if (offset % 4 != 0)
+ return -EINVAL;
+
+ if (offset >= GEN7_L3LOG_SIZE)
+ return -ENXIO;
+
+ return 0;
+}
+
+static ssize_t
+i915_l3_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_minor *dminor = dev_to_drm_minor(dev);
+ struct drm_device *drm_dev = dminor->dev;
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ int slice = (int)(uintptr_t)attr->private;
+ int ret;
+
+ count = round_down(count, 4);
+
+ ret = l3_access_valid(drm_dev, offset);
+ if (ret)
+ return ret;
+
+ count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
+ ret = i915_mutex_lock_interruptible(drm_dev);
+ if (ret)
+ return ret;
+
+ if (dev_priv->l3_parity.remap_info[slice])
+ memcpy(buf,
+ dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ count);
+ else
+ memset(buf, 0, count);
+
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ return count;
+}
+
+static ssize_t
+i915_l3_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_minor *dminor = dev_to_drm_minor(dev);
+ struct drm_device *drm_dev = dminor->dev;
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ struct intel_context *ctx;
+ u32 *temp = NULL; /* Just here to make handling failures easy */
+ int slice = (int)(uintptr_t)attr->private;
+ int ret;
+
+ if (!HAS_HW_CONTEXTS(drm_dev))
+ return -ENXIO;
+
+ ret = l3_access_valid(drm_dev, offset);
+ if (ret)
+ return ret;
+
+ ret = i915_mutex_lock_interruptible(drm_dev);
+ if (ret)
+ return ret;
+
+ if (!dev_priv->l3_parity.remap_info[slice]) {
+ temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return -ENOMEM;
+ }
+ }
+
+ ret = i915_gpu_idle(drm_dev);
+ if (ret) {
+ kfree(temp);
+ mutex_unlock(&drm_dev->struct_mutex);
+ return ret;
+ }
+
+ /* TODO: Ideally we really want a GPU reset here to make sure errors
+ * aren't propagated. Since I cannot find a stable way to reset the GPU
+ * at this point it is left as a TODO.
+ */
+ if (temp)
+ dev_priv->l3_parity.remap_info[slice] = temp;
+
+ memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
+
+ /* NB: We defer the remapping until we switch to the context */
+ list_for_each_entry(ctx, &dev_priv->context_list, link)
+ ctx->remap_slice |= (1<<slice);
+
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ return count;
+}
+
+static struct bin_attribute dpf_attrs = {
+ .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
+ .size = GEN7_L3LOG_SIZE,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
+ .mmap = NULL,
+ .private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+ .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+ .size = GEN7_L3LOG_SIZE,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
+ .mmap = NULL,
+ .private = (void *)1
+};
+
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (IS_VALLEYVIEW(dev_priv->dev)) {
+ u32 freq;
+ freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
+ } else {
+ ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+ else
+ ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ val = vlv_freq_opcode(dev_priv, val);
+ else
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ if (val < dev_priv->rps.min_freq ||
+ val > dev_priv->rps.max_freq ||
+ val < dev_priv->rps.min_freq_softlimit) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
+ }
+
+ if (val > dev_priv->rps.rp0_freq)
+ DRM_DEBUG("User requested overclocking to %d\n",
+ val * GT_FREQUENCY_MULTIPLIER);
+
+ dev_priv->rps.max_freq_softlimit = val;
+
+ if (dev_priv->rps.cur_freq > val) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+ } else if (!IS_VALLEYVIEW(dev)) {
+ /* We still need gen6_set_rps to process the new max_delay and
+ * update the interrupt limits even though frequency request is
+ * unchanged. */
+ gen6_set_rps(dev, dev_priv->rps.cur_freq);
+ }
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+ else
+ ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_freq_opcode(dev_priv, val);
+ else
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ if (val < dev_priv->rps.min_freq ||
+ val > dev_priv->rps.max_freq ||
+ val > dev_priv->rps.max_freq_softlimit) {
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ return -EINVAL;
+ }
+
+ dev_priv->rps.min_freq_softlimit = val;
+
+ if (dev_priv->rps.cur_freq < val) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
+ else
+ gen6_set_rps(dev, val);
+ } else if (!IS_VALLEYVIEW(dev)) {
+ /* We still need gen6_set_rps to process the new min_delay and
+ * update the interrupt limits even though frequency request is
+ * unchanged. */
+ gen6_set_rps(dev, dev_priv->rps.cur_freq);
+ }
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap;
+ ssize_t ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (attr == &dev_attr_gt_RP0_freq_mhz) {
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ BUG();
+ }
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
+ NULL,
+};
+
+static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_vlv_rpe_freq_mhz.attr,
+ NULL,
+};
+
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct i915_error_state_file_priv error_priv;
+ struct drm_i915_error_state_buf error_str;
+ ssize_t ret_count = 0;
+ int ret;
+
+ memset(&error_priv, 0, sizeof(error_priv));
+
+ ret = i915_error_state_buf_init(&error_str, count, off);
+ if (ret)
+ return ret;
+
+ error_priv.dev = dev;
+ i915_error_state_get(dev, &error_priv);
+
+ ret = i915_error_state_to_str(&error_str, &error_priv);
+ if (ret)
+ goto out;
+
+ ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+ memcpy(buf, error_str.buf, ret_count);
+out:
+ i915_error_state_put(&error_priv);
+ i915_error_state_buf_release(&error_str);
+
+ return ret ?: ret_count;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ int ret;
+
+#ifdef CONFIG_PM
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ &rc6_attr_group);
+ if (ret)
+ DRM_ERROR("RC6 residency sysfs setup failed\n");
+ }
+#endif
+ if (HAS_L3_DPF(dev)) {
+ ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
+ if (ret)
+ DRM_ERROR("l3 parity sysfs setup failed\n");
+
+ if (NUM_L3_SLICES(dev) > 1) {
+ ret = device_create_bin_file(dev->primary->kdev,
+ &dpf_attrs_1);
+ if (ret)
+ DRM_ERROR("l3 parity slice 1 setup failed\n");
+ }
+ }
+
+ ret = 0;
+ if (IS_VALLEYVIEW(dev))
+ ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
+ else if (INTEL_INFO(dev)->gen >= 6)
+ ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
+ if (ret)
+ DRM_ERROR("RPS sysfs setup failed\n");
+
+ ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
+ &error_state_attr);
+ if (ret)
+ DRM_ERROR("error_state sysfs setup failed\n");
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+ sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
+ if (IS_VALLEYVIEW(dev))
+ sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
+ else
+ sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+ device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
+ device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
+#ifdef CONFIG_PM
+ sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
+#endif
+}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38..f5aa0067755 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,140 +6,251 @@
#include <linux/tracepoint.h>
#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE i915_trace
-/* object tracking */
+/* pipe updates */
-TRACE_EVENT(i915_gem_object_create,
+TRACE_EVENT(i915_pipe_update_start,
+ TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
+ TP_ARGS(crtc, min, max),
- TP_PROTO(struct drm_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
+ ),
- TP_ARGS(obj),
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(i915_pipe_update_vblank_evaded,
+ TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
+ TP_ARGS(crtc, min, max, frame),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- __field(u32, size)
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->size = obj->size;
+ __entry->pipe = crtc->pipe;
+ __entry->frame = frame;
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->min = min;
+ __entry->max = max;
),
- TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
);
-TRACE_EVENT(i915_gem_object_bind,
+TRACE_EVENT(i915_pipe_update_end,
+ TP_PROTO(struct intel_crtc *crtc, u32 frame),
+ TP_ARGS(crtc, frame),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = frame;
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
- TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
- TP_ARGS(obj, gtt_offset),
+/* object tracking */
+
+TRACE_EVENT(i915_gem_object_create,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- __field(u32, gtt_offset)
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->gtt_offset = gtt_offset;
+ __entry->size = obj->base.size;
),
- TP_printk("obj=%p, gtt_offset=%08x",
- __entry->obj, __entry->gtt_offset)
+ TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
-TRACE_EVENT(i915_gem_object_clflush,
+TRACE_EVENT(i915_vma_bind,
+ TP_PROTO(struct i915_vma *vma, unsigned flags),
+ TP_ARGS(vma, flags),
- TP_PROTO(struct drm_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
+ __field(u32, offset)
+ __field(u32, size)
+ __field(unsigned, flags)
+ ),
- TP_ARGS(obj),
+ TP_fast_assign(
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
+ __entry->obj, __entry->offset, __entry->size,
+ __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
+ __entry->vm)
+);
+
+TRACE_EVENT(i915_vma_unbind,
+ TP_PROTO(struct i915_vma *vma),
+ TP_ARGS(vma),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
+ __field(u32, offset)
+ __field(u32, size)
),
TP_fast_assign(
- __entry->obj = obj;
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
),
- TP_printk("obj=%p", __entry->obj)
+ TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+ __entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_change_domain,
-
- TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
-
- TP_ARGS(obj, old_read_domains, old_write_domain),
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
+ TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write << 16);
),
- TP_printk("obj=%p, read=%04x, write=%04x",
+ TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
- __entry->read_domains, __entry->write_domain)
+ __entry->read_domains >> 16,
+ __entry->read_domains & 0xffff,
+ __entry->write_domain >> 16,
+ __entry->write_domain & 0xffff)
);
-TRACE_EVENT(i915_gem_object_get_fence,
-
- TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
-
- TP_ARGS(obj, fence, tiling_mode),
+TRACE_EVENT(i915_gem_object_pwrite,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- __field(int, fence)
- __field(int, tiling_mode)
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->fence = fence;
- __entry->tiling_mode = tiling_mode;
+ __entry->offset = offset;
+ __entry->len = len;
),
- TP_printk("obj=%p, fence=%d, tiling=%d",
- __entry->obj, __entry->fence, __entry->tiling_mode)
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
);
-TRACE_EVENT(i915_gem_object_unbind,
-
- TP_PROTO(struct drm_gem_object *obj),
-
- TP_ARGS(obj),
+TRACE_EVENT(i915_gem_object_pread,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
+ __entry->offset = offset;
+ __entry->len = len;
),
- TP_printk("obj=%p", __entry->obj)
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
);
-TRACE_EVENT(i915_gem_object_destroy,
+TRACE_EVENT(i915_gem_object_fault,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+ TP_ARGS(obj, index, gtt, write),
+
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, index)
+ __field(bool, gtt)
+ __field(bool, write)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->index = index;
+ __entry->gtt = gtt;
+ __entry->write = write;
+ ),
- TP_PROTO(struct drm_gem_object *obj),
+ TP_printk("obj=%p, %s index=%u %s",
+ __entry->obj,
+ __entry->gtt ? "GTT" : "CPU",
+ __entry->index,
+ __entry->write ? ", writable" : "")
+);
+DECLARE_EVENT_CLASS(i915_gem_object,
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -149,168 +260,336 @@ TRACE_EVENT(i915_gem_object_destroy,
TP_printk("obj=%p", __entry->obj)
);
-/* batch tracing */
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj)
+);
-TRACE_EVENT(i915_gem_request_submit,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj)
+);
- TP_PROTO(struct drm_device *dev, u32 seqno),
+TRACE_EVENT(i915_gem_evict,
+ TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
+ TP_ARGS(dev, size, align, flags),
- TP_ARGS(dev, seqno),
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, size)
+ __field(u32, align)
+ __field(unsigned, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev=%d, size=%d, align=%d %s",
+ __entry->dev, __entry->size, __entry->align,
+ __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
+);
+
+TRACE_EVENT(i915_gem_evict_everything,
+ TP_PROTO(struct drm_device *dev),
+ TP_ARGS(dev),
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, seqno)
- ),
+ ),
TP_fast_assign(
__entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- i915_trace_irq_get(dev, seqno);
- ),
+ ),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%d", __entry->dev)
);
-TRACE_EVENT(i915_gem_request_flush,
+TRACE_EVENT(i915_gem_evict_vm,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
- TP_PROTO(struct drm_device *dev, u32 seqno,
- u32 flush_domains, u32 invalidate_domains),
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = vm->dev->primary->index;
+ __entry->vm = vm;
+ ),
+
+ TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
+);
- TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+TRACE_EVENT(i915_gem_ring_sync_to,
+ TP_PROTO(struct intel_engine_cs *from,
+ struct intel_engine_cs *to,
+ u32 seqno),
+ TP_ARGS(from, to, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, sync_from)
+ __field(u32, sync_to)
__field(u32, seqno)
- __field(u32, flush_domains)
- __field(u32, invalidate_domains)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = from->dev->primary->index;
+ __entry->sync_from = from->id;
+ __entry->sync_to = to->id;
__entry->seqno = seqno;
- __entry->flush_domains = flush_domains;
- __entry->invalidate_domains = invalidate_domains;
),
- TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
- __entry->dev, __entry->seqno,
- __entry->flush_domains, __entry->invalidate_domains)
+ TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+ __entry->dev,
+ __entry->sync_from, __entry->sync_to,
+ __entry->seqno)
);
-
-TRACE_EVENT(i915_gem_request_complete,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno),
+TRACE_EVENT(i915_gem_ring_dispatch,
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
+ TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
+ __field(u32, flags)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
+ __entry->flags = flags;
+ i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+ __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
-TRACE_EVENT(i915_gem_request_retire,
+TRACE_EVENT(i915_gem_ring_flush,
+ TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
+ TP_ARGS(ring, invalidate, flush),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ring)
+ __field(u32, invalidate)
+ __field(u32, flush)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->invalidate = invalidate;
+ __entry->flush = flush;
+ ),
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
+ __entry->dev, __entry->ring,
+ __entry->invalidate, __entry->flush)
+);
- TP_ARGS(dev, seqno),
+DECLARE_EVENT_CLASS(i915_gem_request,
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
+);
- TP_ARGS(dev, seqno),
+TRACE_EVENT(i915_gem_request_complete,
+ TP_PROTO(struct intel_engine_cs *ring),
+ TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->seqno = ring->get_seqno(ring, false);
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_wait_end,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
+);
- TP_ARGS(dev, seqno),
+TRACE_EVENT(i915_gem_request_wait_begin,
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
+ __field(bool, blocking)
),
+ /* NB: the blocking information is racy since mutex_is_locked
+ * doesn't check that the current thread holds the lock. The only
+ * other option would be to pass the boolean information of whether
+ * or not the class was blocking down through the stack which is
+ * less desirable.
+ */
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
+ __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
+ __entry->dev, __entry->ring, __entry->seqno,
+ __entry->blocking ? "yes (NB)" : "no")
);
-TRACE_EVENT(i915_ring_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
+);
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev),
+DECLARE_EVENT_CLASS(i915_ring,
+ TP_PROTO(struct intel_engine_cs *ring),
+ TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
),
- TP_printk("dev=%u", __entry->dev)
+ TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
);
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
+ TP_PROTO(struct intel_engine_cs *ring),
+ TP_ARGS(ring)
+);
- TP_PROTO(struct drm_device *dev),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
+ TP_PROTO(struct intel_engine_cs *ring),
+ TP_ARGS(ring)
+);
- TP_ARGS(dev),
+TRACE_EVENT(i915_flip_request,
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
+
+ TP_ARGS(plane, obj),
TP_STRUCT__entry(
- __field(u32, dev)
+ __field(int, plane)
+ __field(struct drm_i915_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT(i915_flip_complete,
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
+
+ TP_ARGS(plane, obj),
+
+ TP_STRUCT__entry(
+ __field(int, plane)
+ __field(struct drm_i915_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT_CONDITION(i915_reg_rw,
+ TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
+
+ TP_ARGS(write, reg, val, len, trace),
+
+ TP_CONDITION(trace),
+
+ TP_STRUCT__entry(
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->val = (u64)val;
+ __entry->reg = reg;
+ __entry->write = write;
+ __entry->len = len;
+ ),
+
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
+);
+
+TRACE_EVENT(intel_gpu_freq_change,
+ TP_PROTO(u32 freq),
+ TP_ARGS(freq),
+
+ TP_STRUCT__entry(
+ __field(u32, freq)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->freq = freq;
),
- TP_printk("dev=%u", __entry->dev)
+ TP_printk("new_freq=%u", __entry->freq)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index ead876eb6ea..f1df2bd4ecf 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,5 +7,7 @@
#include "i915_drv.h"
+#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "i915_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
new file mode 100644
index 00000000000..480da593e6c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -0,0 +1,538 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Copyright 2013 (c) Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = PCH_DPLL(pipe);
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+void i915_save_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Cursor state */
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ } else {
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ } else {
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+
+ /* Backlight */
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_read_config_byte(dev->pdev, PCI_LBPC,
+ &dev_priv->regfile.saveLBB);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ } else {
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ }
+
+ return;
+}
+
+void i915_restore_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
+
+ /* Backlight */
+ if (INTEL_INFO(dev)->gen <= 4)
+ pci_write_config_byte(dev->pdev, PCI_LBPC,
+ dev_priv->regfile.saveLBB);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ } else {
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ }
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+ break;
+ }
+
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
+ } else {
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+
+ I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+ }
+
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+
+ I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+ /* Cursor state */
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+
+ return;
+}
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
new file mode 100644
index 00000000000..d96eee1ae9c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -0,0 +1,165 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/vga_switcheroo.h>
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+ acpi_handle dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+ 0xd3, 0x73, 0xd8, 0x7e,
+ 0xd0, 0xc2,
+ 0x4f, 0x4e,
+ 0xa8, 0x54,
+ 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static char *intel_dsm_port_name(u8 id)
+{
+ switch (id) {
+ case 0:
+ return "Reserved";
+ case 1:
+ return "Analog VGA";
+ case 2:
+ return "LVDS";
+ case 3:
+ return "Reserved";
+ case 4:
+ return "HDMI/DVI_B";
+ case 5:
+ return "HDMI/DVI_C";
+ case 6:
+ return "HDMI/DVI_D";
+ case 7:
+ return "DisplayPort_A";
+ case 8:
+ return "DisplayPort_B";
+ case 9:
+ return "DisplayPort_C";
+ case 0xa:
+ return "DisplayPort_D";
+ case 0xb:
+ case 0xc:
+ case 0xd:
+ return "Reserved";
+ case 0xe:
+ return "WiDi";
+ default:
+ return "bad type";
+ }
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+ switch (type) {
+ case 0:
+ return "unknown";
+ case 1:
+ return "No MUX, iGPU only";
+ case 2:
+ return "No MUX, dGPU only";
+ case 3:
+ return "MUXed between iGPU and dGPU";
+ default:
+ return "bad type";
+ }
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+ int i;
+ union acpi_object *pkg, *connector_count;
+
+ pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+ INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!pkg) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
+ return;
+ }
+
+ connector_count = &pkg->package.elements[0];
+ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ (unsigned long long)connector_count->integer.value);
+ for (i = 1; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+ union acpi_object *connector_id = &obj->package.elements[0];
+ union acpi_object *info = &obj->package.elements[1];
+ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ (unsigned long long)connector_id->integer.value);
+ DRM_DEBUG_DRIVER(" port id: %s\n",
+ intel_dsm_port_name(info->buffer.pointer[0]));
+ DRM_DEBUG_DRIVER(" display mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[1]));
+ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[2]));
+ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[3]));
+ }
+
+ ACPI_FREE(pkg);
+}
+
+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+ acpi_handle dhandle;
+
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+ 1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
+ DRM_DEBUG_KMS("no _DSM method for intel device\n");
+ return false;
+ }
+
+ intel_dsm_priv.dhandle = dhandle;
+ intel_dsm_platform_mux_info();
+
+ return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ bool has_dsm = false;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+ has_dsm |= intel_dsm_pci_probe(pdev);
+ }
+
+ if (vga_count == 2 && has_dsm) {
+ acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+ if (!intel_dsm_detect())
+ return;
+}
+
+void intel_unregister_dsm_handler(void)
+{
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f2756774758..827498e081d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,15 +24,18 @@
* Eric Anholt <eric@anholt.net>
*
*/
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <linux/dmi.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_bios.h"
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
+static int panel_type;
+
static void *
find_section(struct bdb_header *bdb, int section_id)
{
@@ -46,13 +49,19 @@ find_section(struct bdb_header *bdb, int section_id)
total = bdb->bdb_size;
/* walk the sections looking for section_id */
- while (index < total) {
+ while (index + 3 < total) {
current_id = *(base + index);
index++;
+
current_size = *((u16 *)(base + index));
index += 2;
+
+ if (index + current_size > total)
+ return NULL;
+
if (current_id == section_id)
return base + index;
+
index += current_size;
}
@@ -71,7 +80,7 @@ get_blocksize(void *p)
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
- struct lvds_dvo_timing *dvo_timing)
+ const struct lvds_dvo_timing *dvo_timing)
{
panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
dvo_timing->hactive_lo;
@@ -93,6 +102,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -102,33 +121,131 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
drm_mode_set_name(panel_fixed_mode);
}
+static bool
+lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
+ const struct lvds_dvo_timing *b)
+{
+ if (a->hactive_hi != b->hactive_hi ||
+ a->hactive_lo != b->hactive_lo)
+ return false;
+
+ if (a->hsync_off_hi != b->hsync_off_hi ||
+ a->hsync_off_lo != b->hsync_off_lo)
+ return false;
+
+ if (a->hsync_pulse_width != b->hsync_pulse_width)
+ return false;
+
+ if (a->hblank_hi != b->hblank_hi ||
+ a->hblank_lo != b->hblank_lo)
+ return false;
+
+ if (a->vactive_hi != b->vactive_hi ||
+ a->vactive_lo != b->vactive_lo)
+ return false;
+
+ if (a->vsync_off != b->vsync_off)
+ return false;
+
+ if (a->vsync_pulse_width != b->vsync_pulse_width)
+ return false;
+
+ if (a->vblank_hi != b->vblank_hi ||
+ a->vblank_lo != b->vblank_lo)
+ return false;
+
+ return true;
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+ int index)
+{
+ /*
+ * the size of fp_timing varies on the different platform.
+ * So calculate the DVO timing relative offset in LVDS data
+ * entry to get the DVO timing entry
+ */
+
+ int lfp_data_size =
+ lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ int dvo_timing_offset =
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+ char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+ return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+ const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+ size_t ofs;
+
+ if (index >= ARRAY_SIZE(ptrs->ptr))
+ return NULL;
+ ofs = ptrs->ptr[index].fp_timing_offset;
+ if (ofs < data_ofs ||
+ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+ return NULL;
+ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct bdb_lvds_options *lvds_options;
- struct bdb_lvds_lfp_data *lvds_lfp_data;
- struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
- struct bdb_lvds_lfp_data_entry *entry;
- struct lvds_dvo_timing *dvo_timing;
+ const struct bdb_lvds_options *lvds_options;
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
- int lfp_data_size, dvo_timing_offset;
- int i, temp_downclock;
- struct drm_display_mode *temp_mode;
-
- /* Defaults if we can't find VBT info */
- dev_priv->lvds_dither = 0;
- dev_priv->lvds_vbt = 0;
+ int i, downclock, drrs_mode;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
- dev_priv->lvds_dither = lvds_options->pixel_dither;
+ dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
+ panel_type = lvds_options->panel_type;
+
+ drrs_mode = (lvds_options->dps_panel_type_bits
+ >> (panel_type * 2)) & MODE_MASK;
+ /*
+ * VBT has static DRRS = 0 and seamless DRRS = 2.
+ * The below piece of code is required to adjust vbt.drrs_type
+ * to match the enum drrs_support_type.
+ */
+ switch (drrs_mode) {
+ case 0:
+ dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
+ DRM_DEBUG_KMS("DRRS supported mode is static\n");
+ break;
+ case 2:
+ dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
+ DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+ break;
+ default:
+ dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+ break;
+ }
+
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
return;
@@ -137,74 +254,94 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs)
return;
- dev_priv->lvds_vbt = 1;
+ dev_priv->vbt.lvds_vbt = 1;
- lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
- entry = (struct bdb_lvds_lfp_data_entry *)
- ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
- lvds_options->panel_type));
- dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
-
- /*
- * the size of fp_timing varies on the different platform.
- * So calculate the DVO timing relative offset in LVDS data
- * entry to get the DVO timing entry
- */
- dvo_timing = (struct lvds_dvo_timing *)
- ((unsigned char *)entry + dvo_timing_offset);
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
- fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
- dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+ dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
- temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
- temp_downclock = panel_fixed_mode->clock;
/*
- * enumerate the LVDS panel timing info entry in VBT to check whether
- * the LVDS downclock is found.
+ * Iterate over the LVDS panel timing info to find the lowest clock
+ * for the native resolution.
*/
+ downclock = panel_dvo_timing->clock;
for (i = 0; i < 16; i++) {
- entry = (struct bdb_lvds_lfp_data_entry *)
- ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
- dvo_timing = (struct lvds_dvo_timing *)
- ((unsigned char *)entry + dvo_timing_offset);
-
- fill_detail_timing_data(temp_mode, dvo_timing);
-
- if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
- temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
- temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
- temp_mode->htotal == panel_fixed_mode->htotal &&
- temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
- temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
- temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
- temp_mode->vtotal == panel_fixed_mode->vtotal &&
- temp_mode->clock < temp_downclock) {
- /*
- * downclock is already found. But we expect
- * to find the lower downclock.
- */
- temp_downclock = temp_mode->clock;
- }
- /* clear it to zero */
- memset(temp_mode, 0, sizeof(*temp_mode));
+ const struct lvds_dvo_timing *dvo_timing;
+
+ dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ i);
+ if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
+ dvo_timing->clock < downclock)
+ downclock = dvo_timing->clock;
}
- kfree(temp_mode);
- if (temp_downclock < panel_fixed_mode->clock) {
+
+ if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = temp_downclock;
- DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
- "Normal Clock %dKHz, downclock %dKHz\n",
- temp_downclock, panel_fixed_mode->clock);
+ dev_priv->lvds_downclock = downclock * 10;
+ DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ panel_fixed_mode->clock, 10*downclock);
}
- return;
+
+ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+ if (fp_timing) {
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+ dev_priv->vbt.bios_lvds_val);
+ }
+ }
+}
+
+static void
+parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ const struct bdb_lfp_backlight_data *backlight_data;
+ const struct bdb_lfp_backlight_data_entry *entry;
+
+ backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+ if (!backlight_data)
+ return;
+
+ if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+ DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
+ backlight_data->entry_size);
+ return;
+ }
+
+ entry = &backlight_data->data[panel_type];
+
+ dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+ if (!dev_priv->vbt.backlight.present) {
+ DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
+ entry->type);
+ return;
+ }
+
+ dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+ dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u\n",
+ dev_priv->vbt.backlight.pwm_freq_hz,
+ dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+ entry->min_brightness,
+ backlight_data->level[panel_type]);
}
/* Try to find sdvo panel data */
@@ -212,60 +349,79 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct bdb_sdvo_lvds_options *sdvo_lvds_options;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int index;
- dev_priv->sdvo_lvds_vbt_mode = NULL;
-
- sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
- if (!sdvo_lvds_options)
+ index = i915.vbt_sdvo_panel_type;
+ if (index == -2) {
+ DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
+ }
+
+ if (index == -1) {
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
if (!dvo_timing)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode,
- dvo_timing + sdvo_lvds_options->panel_type);
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
- dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+ dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
- return;
+ DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+ bool alternate)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ return alternate ? 66667 : 48000;
+ case 3:
+ case 4:
+ return alternate ? 100000 : 96000;
+ default:
+ return alternate ? 100000 : 120000;
+ }
}
static void
parse_general_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
+ struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
- /* Set sensible defaults in case we can't find the general block */
- dev_priv->int_tv_support = 1;
- dev_priv->int_crt_support = 1;
-
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
- dev_priv->int_tv_support = general->int_tv_support;
- dev_priv->int_crt_support = general->int_crt_support;
- dev_priv->lvds_use_ssc = general->enable_ssc;
-
- if (dev_priv->lvds_use_ssc) {
- if (IS_I85X(dev_priv->dev))
- dev_priv->lvds_ssc_freq =
- general->ssc_freq ? 66 : 48;
- else if (IS_IRONLAKE(dev_priv->dev))
- dev_priv->lvds_ssc_freq =
- general->ssc_freq ? 100 : 120;
- else
- dev_priv->lvds_ssc_freq =
- general->ssc_freq ? 100 : 96;
- }
+ dev_priv->vbt.int_tv_support = general->int_tv_support;
+ dev_priv->vbt.int_crt_support = general->int_crt_support;
+ dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
+ dev_priv->vbt.lvds_ssc_freq =
+ intel_bios_ssc_frequency(dev, general->ssc_freq);
+ dev_priv->vbt.display_clock_mode = general->display_clock_mode;
+ dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+ dev_priv->vbt.int_tv_support,
+ dev_priv->vbt.int_crt_support,
+ dev_priv->vbt.lvds_use_ssc,
+ dev_priv->vbt.lvds_ssc_freq,
+ dev_priv->vbt.display_clock_mode,
+ dev_priv->vbt.fdi_rx_polarity_inverted);
}
}
@@ -274,14 +430,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *general;
- const int crt_bus_map_table[] = {
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- GPIOF,
- };
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
@@ -289,30 +437,28 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if ((bus_pin >= 1) && (bus_pin <= 6)) {
- dev_priv->crt_ddc_bus =
- crt_bus_map_table[bus_pin-1];
- }
+ if (intel_gmbus_is_port_valid(bus_pin))
+ dev_priv->vbt.crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- block_size);
+ block_size);
}
}
}
static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i, child_device_num, count;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -333,40 +479,48 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->old.device_type) {
/* skip the device block if device type is invalid */
continue;
}
- if (p_child->slave_addr != SLAVE_ADDR1 &&
- p_child->slave_addr != SLAVE_ADDR2) {
+ if (p_child->old.slave_addr != SLAVE_ADDR1 &&
+ p_child->old.slave_addr != SLAVE_ADDR2) {
/*
* If the slave address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
}
- if (p_child->dvo_port != DEVICE_PORT_DVOB &&
- p_child->dvo_port != DEVICE_PORT_DVOC) {
+ if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
+ p_child->old.dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
" %s port\n",
- p_child->slave_addr,
- (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ p_child->old.slave_addr,
+ (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
if (!p_mapping->initialized) {
- p_mapping->dvo_port = p_child->dvo_port;
- p_mapping->slave_addr = p_child->slave_addr;
- p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->dvo_port = p_child->old.dvo_port;
+ p_mapping->slave_addr = p_child->old.slave_addr;
+ p_mapping->dvo_wiring = p_child->old.dvo_wiring;
+ p_mapping->ddc_pin = p_child->old.ddc_pin;
+ p_mapping->i2c_pin = p_child->old.i2c_pin;
p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (p_child->slave2_addr) {
+ if (p_child->old.slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -386,22 +540,467 @@ static void
parse_driver_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct drm_device *dev = dev_priv->dev;
struct bdb_driver_features *driver;
driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (driver && SUPPORTS_EDP(dev) &&
- driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
- dev_priv->edp_support = 1;
- } else {
- dev_priv->edp_support = 0;
- }
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->vbt.edp_support = 1;
- if (driver && driver->dual_frequency)
+ if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
+
+ DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+ * This is because, VBT is configured in such a way that
+ * static DRRS is 0 and DRRS not supported is represented by
+ * driver->drrs_enabled=false
+ */
+ if (!driver->drrs_enabled)
+ dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+}
+
+static void
+parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+ if (dev_priv->vbt.edp_support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->vbt.edp_bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->vbt.edp_bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->vbt.edp_bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->vbt.edp_pps = *edp_pps;
+
+ switch (edp_link_params->rate) {
+ case EDP_RATE_1_62:
+ dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+ break;
+ case EDP_RATE_2_7:
+ dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
+ break;
+ }
+
+ switch (edp_link_params->lanes) {
+ case EDP_LANE_1:
+ dev_priv->vbt.edp_lanes = 1;
+ break;
+ case EDP_LANE_2:
+ dev_priv->vbt.edp_lanes = 2;
+ break;
+ case EDP_LANE_4:
+ dev_priv->vbt.edp_lanes = 4;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
+ edp_link_params->lanes);
+ break;
+ }
+
+ switch (edp_link_params->preemphasis) {
+ case EDP_PREEMPHASIS_NONE:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case EDP_PREEMPHASIS_3_5dB:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case EDP_PREEMPHASIS_6dB:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case EDP_PREEMPHASIS_9_5dB:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
+ edp_link_params->preemphasis);
+ break;
+ }
+
+ switch (edp_link_params->vswing) {
+ case EDP_VSWING_0_4V:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case EDP_VSWING_0_6V:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case EDP_VSWING_0_8V:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case EDP_VSWING_1_2V:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
+ edp_link_params->vswing);
+ break;
+ }
+}
+
+static u8 *goto_next_sequence(u8 *data, int *size)
+{
+ u16 len;
+ int tmp = *size;
+
+ if (--tmp < 0)
+ return NULL;
+
+ /* goto first element */
+ data++;
+ while (1) {
+ switch (*data) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ /*
+ * skip by this element payload size
+ * skip elem id, command flag and data type
+ */
+ tmp -= 5;
+ if (tmp < 0)
+ return NULL;
+
+ data += 3;
+ len = *((u16 *)data);
+
+ tmp -= len;
+ if (tmp < 0)
+ return NULL;
+
+ /* skip by len */
+ data = data + 2 + len;
+ break;
+ case MIPI_SEQ_ELEM_DELAY:
+ /* skip by elem id, and delay is 4 bytes */
+ tmp -= 5;
+ if (tmp < 0)
+ return NULL;
+
+ data += 5;
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ tmp -= 3;
+ if (tmp < 0)
+ return NULL;
+
+ data += 3;
+ break;
+ default:
+ DRM_ERROR("Unknown element\n");
+ return NULL;
+ }
+
+ /* end of sequence ? */
+ if (*data == 0)
+ break;
+ }
+
+ /* goto next sequence or end of block byte */
+ if (--tmp < 0)
+ return NULL;
+
+ data++;
+
+ /* update amount of data left for the sequence block to be parsed */
+ *size = tmp;
+ return data;
+}
+
+static void
+parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_mipi_config *start;
+ struct bdb_mipi_sequence *sequence;
+ struct mipi_config *config;
+ struct mipi_pps_data *pps;
+ u8 *data, *seq_data;
+ int i, panel_id, seq_size;
+ u16 block_size;
+
+ /* parse MIPI blocks only if LFP type is MIPI */
+ if (!dev_priv->vbt.has_mipi)
+ return;
+
+ /* Initialize this to undefined indicating no generic MIPI support */
+ dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+
+ /* Block #40 is already parsed and panel_fixed_mode is
+ * stored in dev_priv->lfp_lvds_vbt_mode
+ * resuse this when needed
+ */
+
+ /* Parse #52 for panel index used from panel_type already
+ * parsed
+ */
+ start = find_section(bdb, BDB_MIPI_CONFIG);
+ if (!start) {
+ DRM_DEBUG_KMS("No MIPI config BDB found");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
+ panel_type);
+
+ /*
+ * get hold of the correct configuration block and pps data as per
+ * the panel_type as index
+ */
+ config = &start->config[panel_type];
+ pps = &start->pps[panel_type];
+
+ /* store as of now full data. Trim when we realise all is not needed */
+ dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.config)
+ return;
+
+ dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.pps) {
+ kfree(dev_priv->vbt.dsi.config);
+ return;
+ }
+
+ /* We have mandatory mipi config blocks. Initialize as generic panel */
+ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+
+ /* Check if we have sequence block as well */
+ sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+ if (!sequence) {
+ DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+
+ block_size = get_blocksize(sequence);
+
+ /*
+ * parse the sequence block for individual sequences
+ */
+ dev_priv->vbt.dsi.seq_version = sequence->version;
+
+ seq_data = &sequence->data[0];
+
+ /*
+ * sequence block is variable length and hence we need to parse and
+ * get the sequence data for specific panel id
+ */
+ for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
+ panel_id = *seq_data;
+ seq_size = *((u16 *) (seq_data + 1));
+ if (panel_id == panel_type)
+ break;
+
+ /* skip the sequence including seq header of 3 bytes */
+ seq_data = seq_data + 3 + seq_size;
+ if ((seq_data - &sequence->data[0]) > block_size) {
+ DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
+ return;
+ }
+ }
+
+ if (i == MAX_MIPI_CONFIGURATIONS) {
+ DRM_ERROR("Sequence block detected but no valid configuration\n");
+ return;
+ }
+
+ /* check if found sequence is completely within the sequence block
+ * just being paranoid */
+ if (seq_size > block_size) {
+ DRM_ERROR("Corrupted sequence/size, bailing out\n");
+ return;
+ }
+
+ /* skip the panel id(1 byte) and seq size(2 bytes) */
+ dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.data)
+ return;
+
+ /*
+ * loop into the sequence data and split into multiple sequneces
+ * There are only 5 types of sequences as of now
+ */
+ data = dev_priv->vbt.dsi.data;
+ dev_priv->vbt.dsi.size = seq_size;
+
+ /* two consecutive 0x00 indicate end of all sequences */
+ while (1) {
+ int seq_id = *data;
+ if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
+ dev_priv->vbt.dsi.sequence[seq_id] = data;
+ DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
+ } else {
+ DRM_ERROR("undefined sequence\n");
+ goto err;
+ }
+
+ /* partial parsing to skip elements */
+ data = goto_next_sequence(data, &seq_size);
+
+ if (data == NULL) {
+ DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
+ goto err;
+ }
+
+ if (*data == 0)
+ break; /* end of sequence reached */
+ }
+
+ DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
+ return;
+err:
+ kfree(dev_priv->vbt.dsi.data);
+ dev_priv->vbt.dsi.data = NULL;
+
+ /* error during parsing so set all pointers to null
+ * because of partial parsing */
+ memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX);
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ struct bdb_header *bdb)
+{
+ union child_device_config *it, *child = NULL;
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+ uint8_t hdmi_level_shift;
+ int i, j;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ uint8_t aux_channel;
+ /* Each DDI port can have more than one value on the "DVO Port" field,
+ * so look for all the possible values for each port and abort if more
+ * than one is found. */
+ int dvo_ports[][2] = {
+ {DVO_PORT_HDMIA, DVO_PORT_DPA},
+ {DVO_PORT_HDMIB, DVO_PORT_DPB},
+ {DVO_PORT_HDMIC, DVO_PORT_DPC},
+ {DVO_PORT_HDMID, DVO_PORT_DPD},
+ {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+ };
+
+ /* Find the child device to use, abort if more than one found. */
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ it = dev_priv->vbt.child_dev + i;
+
+ for (j = 0; j < 2; j++) {
+ if (dvo_ports[port][j] == -1)
+ break;
+
+ if (it->common.dvo_port == dvo_ports[port][j]) {
+ if (child) {
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+ port_name(port));
+ return;
+ }
+ child = it;
+ }
+ }
+ }
+ if (!child)
+ return;
+
+ aux_channel = child->raw[25];
+
+ is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+ is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+ is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+
+ info->supports_dvi = is_dvi;
+ info->supports_hdmi = is_hdmi;
+ info->supports_dp = is_dp;
+
+ DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
+ port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+
+ if (is_edp && is_dvi)
+ DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+ port_name(port));
+ if (is_crt && port != PORT_E)
+ DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+ if (is_crt && (is_dvi || is_dp))
+ DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+ port_name(port));
+ if (is_dvi && (port == PORT_A || port == PORT_E))
+ DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+ if (!is_dvi && !is_dp && !is_crt)
+ DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+ port_name(port));
+ if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+ DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+ if (is_dvi) {
+ if (child->common.ddc_pin == 0x05 && port != PORT_B)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+ if (child->common.ddc_pin == 0x04 && port != PORT_C)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+ if (child->common.ddc_pin == 0x06 && port != PORT_D)
+ DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+ }
+
+ if (is_dp) {
+ if (aux_channel == 0x40 && port != PORT_A)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+ if (aux_channel == 0x10 && port != PORT_B)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+ if (aux_channel == 0x20 && port != PORT_C)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+ if (aux_channel == 0x30 && port != PORT_D)
+ DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+ }
+
+ if (bdb->version >= 158) {
+ /* The VBT HDMI level shift values match the table we have. */
+ hdmi_level_shift = child->raw[7] & 0xF;
+ if (hdmi_level_shift < 0xC) {
+ DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+ port_name(port),
+ hdmi_level_shift);
+ info->hdmi_level_shift = hdmi_level_shift;
+ }
+ }
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum port port;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return;
+
+ if (bdb->version < 155)
+ return;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++)
+ parse_ddi_port(dev_priv, port, bdb);
}
static void
@@ -409,13 +1008,13 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *p_defs;
- struct child_device_config *p_child, *child_dev_ptr;
+ union child_device_config *p_child, *child_dev_ptr;
int i, child_device_num, count;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -437,92 +1036,236 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
count++;
}
if (!count) {
- DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
- dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
- if (!dev_priv->child_dev) {
+ dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ if (!dev_priv->vbt.child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
}
- dev_priv->child_dev_num = count;
+ dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
- if (!p_child->device_type) {
+ if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
- child_dev_ptr = dev_priv->child_dev + count;
+
+ if (p_child->common.dvo_port >= DVO_PORT_MIPIA
+ && p_child->common.dvo_port <= DVO_PORT_MIPID
+ &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
+ DRM_DEBUG_KMS("Found MIPI as LFP\n");
+ dev_priv->vbt.has_mipi = 1;
+ dev_priv->vbt.dsi.port = p_child->common.dvo_port;
+ }
+
+ child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
sizeof(*p_child));
}
return;
}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum port port;
+
+ dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+ /* Default to having backlight */
+ dev_priv->vbt.backlight.present = true;
+
+ /* LFP panel data */
+ dev_priv->vbt.lvds_dither = 1;
+ dev_priv->vbt.lvds_vbt = 0;
+
+ /* SDVO panel data */
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+
+ /* general features */
+ dev_priv->vbt.int_tv_support = 1;
+ dev_priv->vbt.int_crt_support = 1;
+
+ /* Default to using SSC */
+ dev_priv->vbt.lvds_use_ssc = 1;
+ /*
+ * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+ * clock for LVDS.
+ */
+ dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
+ !HAS_PCH_SPLIT(dev));
+ DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+
+ /* Recommended BSpec default: 800mV 0dB. */
+ info->hdmi_level_shift = 6;
+
+ info->supports_dvi = (port != PORT_A && port != PORT_E);
+ info->supports_hdmi = info->supports_dvi;
+ info->supports_dp = (port != PORT_E);
+ }
+}
+
+static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+ "VBIOS ROM for %s\n",
+ id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+ {
+ .callback = intel_no_opregion_vbt_callback,
+ .ident = "ThinkCentre A57",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+ },
+ },
+ { }
+};
+
+static struct bdb_header *validate_vbt(char *base, size_t size,
+ struct vbt_header *vbt,
+ const char *source)
+{
+ size_t offset;
+ struct bdb_header *bdb;
+
+ if (vbt == NULL) {
+ DRM_DEBUG_DRIVER("VBT signature missing\n");
+ return NULL;
+ }
+
+ offset = (char *)vbt - base;
+ if (offset + sizeof(struct vbt_header) > size) {
+ DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ return NULL;
+ }
+
+ if (memcmp(vbt->signature, "$VBT", 4)) {
+ DRM_DEBUG_DRIVER("VBT invalid signature\n");
+ return NULL;
+ }
+
+ offset += vbt->bdb_offset;
+ if (offset + sizeof(struct bdb_header) > size) {
+ DRM_DEBUG_DRIVER("BDB header incomplete\n");
+ return NULL;
+ }
+
+ bdb = (struct bdb_header *)(base + offset);
+ if (offset + bdb->bdb_size > size) {
+ DRM_DEBUG_DRIVER("BDB incomplete\n");
+ return NULL;
+ }
+
+ DRM_DEBUG_KMS("Using VBT from %s: %20s\n",
+ source, vbt->signature);
+ return bdb;
+}
+
/**
- * intel_init_bios - initialize VBIOS settings & find VBT
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
*
- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
- * feed an updated VBT back through that, compared to what we'll fetch using
- * this method of groping around in the BIOS data.
- *
* Returns 0 on success, nonzero on failure.
*/
-bool
-intel_init_bios(struct drm_device *dev)
+int
+intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
- struct vbt_header *vbt = NULL;
- struct bdb_header *bdb;
- u8 __iomem *bios;
- size_t size;
- int i;
-
- bios = pci_map_rom(pdev, &size);
- if (!bios)
- return -1;
-
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
- break;
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
+
+ if (HAS_PCH_NOP(dev))
+ return -ENODEV;
+
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
+ bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE,
+ (struct vbt_header *)dev_priv->opregion.vbt,
+ "OpRegion");
+
+ if (bdb == NULL) {
+ size_t i, size;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (memcmp(bios + i, "$VBT", 4) == 0) {
+ bdb = validate_vbt(bios, size,
+ (struct vbt_header *)(bios + i),
+ "PCI ROM");
+ break;
+ }
}
- }
- if (!vbt) {
- DRM_ERROR("VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
- return -1;
+ if (!bdb) {
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
}
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
-
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
+ parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
+ parse_mipi(dev_priv, bdb);
+ parse_ddi_ports(dev_priv, bdb);
- pci_unmap_rom(pdev, bios);
+ if (bios)
+ pci_unmap_rom(pdev, bios);
return 0;
}
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Set the Panel Power On/Off timings if uninitialized. */
+ if (!HAS_PCH_SPLIT(dev) &&
+ I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
+ /* Set T2 to 40ms and T5 to 200ms */
+ I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+ /* Set T3 to 35ms and Tx to 200ms */
+ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 425ac9d7f72..b9866779633 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,7 +28,7 @@
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
-#include "drmP.h"
+#include <drm/drmP.h>
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
@@ -39,7 +39,7 @@ struct vbt_header {
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
@@ -65,7 +65,7 @@ struct vbios_data {
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
/*
* There are several types of BIOS data blocks (BDBs), each block has
@@ -98,11 +98,14 @@ struct vbios_data {
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
+#define BDB_MIPI_CONFIG 52
+#define BDB_MIPI_SEQUENCE 53
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
@@ -119,12 +122,16 @@ struct bdb_general_features {
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
- u8 rsvd8:3; /* finish byte */
+ u8 rsvd7:1;
+ u8 display_clock_mode:1;
+ u8 rsvd8:1; /* finish byte */
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:6; /* finish byte */
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -132,8 +139,11 @@ struct bdb_general_features {
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
- u8 rsvd11:6; /* finish byte */
-} __attribute__((packed));
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
+} __packed;
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -193,10 +203,13 @@ struct bdb_general_features {
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
-struct child_device_config {
+/* We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. */
+struct old_child_dev_config {
u16 handle;
u16 device_type;
- u8 device_id[10]; /* See DEVICE_TYPE_* above */
+ u8 device_id[10]; /* ascii string */
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
@@ -213,7 +226,33 @@ struct child_device_config {
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
-} __attribute__((packed));
+} __packed;
+
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+struct common_child_dev_config {
+ u16 handle;
+ u16 device_type;
+ u8 not_common1[12];
+ u8 dvo_port;
+ u8 not_common2[2];
+ u8 ddc_pin;
+ u16 edid_ptr;
+} __packed;
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+ /* This one is safe to be used anywhere, but the code should still check
+ * the BDB version. */
+ u8 raw[33];
+ /* This one should only be kept for legacy code. */
+ struct old_child_dev_config old;
+ /* This one should also be safe to use anywhere, even without version
+ * checks. */
+ struct common_child_dev_config common;
+};
struct bdb_general_definitions {
/* DDC GPIO */
@@ -238,10 +277,13 @@ struct bdb_general_definitions {
* And the device num is related with the size of general definition
* block. It is obtained by using the following formula:
* number = (block_size - sizeof(bdb_general_definitions))/
- * sizeof(child_device_config);
+ * sizeof(child_device_config);
*/
- struct child_device_config devices[0];
-} __attribute__((packed));
+ union child_device_config devices[0];
+} __packed;
+
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK 0x3
struct bdb_lvds_options {
u8 panel_type;
@@ -255,7 +297,19 @@ struct bdb_lvds_options {
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
-} __attribute__((packed));
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits;
+} __packed;
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
@@ -265,12 +319,12 @@ struct bdb_lvds_lfp_data_ptr {
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
@@ -287,7 +341,7 @@ struct lvds_fp_timing {
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
-} __attribute__((packed));
+} __packed;
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
@@ -315,7 +369,7 @@ struct lvds_dvo_timing {
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
struct lvds_pnp_id {
u16 mfg_name;
@@ -323,17 +377,36 @@ struct lvds_pnp_id {
u32 serial;
u8 mfg_week;
u8 mfg_year;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
+
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct bdb_lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct bdb_lfp_backlight_data_entry data[16];
+ u8 level[16];
+} __packed;
struct aimdb_header {
char signature[16];
@@ -341,12 +414,12 @@ struct aimdb_header {
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct vch_panel_data {
u16 fp_timing_offset;
@@ -357,12 +430,12 @@ struct vch_panel_data {
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
@@ -378,7 +451,7 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
#define BDB_DRIVER_FEATURE_NO_LVDS 0
@@ -424,9 +497,67 @@ struct bdb_driver_features {
u8 hdmi_termination;
u8 custom_vbt_version;
-} __attribute__((packed));
-
-bool intel_init_bios(struct drm_device *dev);
+ /* Driver features data block */
+ u16 rmpm_enabled:1;
+ u16 s2ddt_enabled:1;
+ u16 dpst_enabled:1;
+ u16 bltclt_enabled:1;
+ u16 adb_enabled:1;
+ u16 drrs_enabled:1;
+ u16 grs_enabled:1;
+ u16 gpmt_enabled:1;
+ u16 tbt_enabled:1;
+ u16 psr_enabled:1;
+ u16 ips_enabled:1;
+ u16 reserved3:4;
+ u16 pc_feature_valid:1;
+} __packed;
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __packed;
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ struct edp_link_params link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature;
+ u16 edp_t3_optimization;
+} __packed;
+
+void intel_setup_bios(struct drm_device *dev);
+int intel_parse_bios(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
@@ -556,6 +687,40 @@ bool intel_init_bios(struct drm_device *dev);
#define DEVICE_TYPE_DP 0x68C6
#define DEVICE_TYPE_eDP 0x78C6
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_NOT_HDMI_OUTPUT | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_DUAL_CHANNEL | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
/* define the DVO port for HDMI output type */
#define DVO_B 1
#define DVO_C 2
@@ -566,4 +731,207 @@ bool intel_init_bios(struct drm_device *dev);
#define PORT_IDPC 8
#define PORT_IDPD 9
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+#define DVO_PORT_MIPIA 21
+#define DVO_PORT_MIPIB 22
+#define DVO_PORT_MIPIC 23
+#define DVO_PORT_MIPID 24
+
+/* Block 52 contains MIPI Panel info
+ * 6 such enteries will there. Index into correct
+ * entery is based on the panel_index in #40 LFP
+ */
+#define MAX_MIPI_CONFIGURATIONS 6
+
+#define MIPI_DSI_UNDEFINED_PANEL_ID 0
+#define MIPI_DSI_GENERIC_PANEL_ID 1
+
+struct mipi_config {
+ u16 panel_id;
+
+ /* General Params */
+ u32 enable_dithering:1;
+ u32 rsvd1:1;
+ u32 is_bridge:1;
+
+ u32 panel_arch_type:2;
+ u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE 0x1
+#define NON_BURST_SYNC_EVENTS 0x2
+#define BURST_MODE 0x3
+ u32 video_transfer_mode:2;
+
+ u32 cabc_supported:1;
+ u32 pwm_blc:1;
+
+ /* Bit 13:10 */
+#define PIXEL_FORMAT_RGB565 0x1
+#define PIXEL_FORMAT_RGB666 0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
+#define PIXEL_FORMAT_RGB888 0x4
+ u32 videomode_color_format:4;
+
+ /* Bit 15:14 */
+#define ENABLE_ROTATION_0 0x0
+#define ENABLE_ROTATION_90 0x1
+#define ENABLE_ROTATION_180 0x2
+#define ENABLE_ROTATION_270 0x3
+ u32 rotation:2;
+ u32 bta_enabled:1;
+ u32 rsvd2:15;
+
+ /* 2 byte Port Description */
+#define DUAL_LINK_NOT_SUPPORTED 0
+#define DUAL_LINK_FRONT_BACK 1
+#define DUAL_LINK_PIXEL_ALT 2
+ u16 dual_link:2;
+ u16 lane_cnt:2;
+ u16 rsvd3:12;
+
+ u16 rsvd4;
+
+ u8 rsvd5[5];
+ u32 dsi_ddr_clk;
+ u32 bridge_ref_clk;
+
+#define BYTE_CLK_SEL_20MHZ 0
+#define BYTE_CLK_SEL_10MHZ 1
+#define BYTE_CLK_SEL_5MHZ 2
+ u8 byte_clk_sel:2;
+
+ u8 rsvd6:6;
+
+ /* DPHY Flags */
+ u16 dphy_param_valid:1;
+ u16 eot_pkt_disabled:1;
+ u16 enable_clk_stop:1;
+ u16 rsvd7:13;
+
+ u32 hs_tx_timeout;
+ u32 lp_rx_timeout;
+ u32 turn_around_timeout;
+ u32 device_reset_timer;
+ u32 master_init_timer;
+ u32 dbi_bw_timer;
+ u32 lp_byte_clk_val;
+
+ /* 4 byte Dphy Params */
+ u32 prepare_cnt:6;
+ u32 rsvd8:2;
+ u32 clk_zero_cnt:8;
+ u32 trail_cnt:5;
+ u32 rsvd9:3;
+ u32 exit_zero_cnt:6;
+ u32 rsvd10:2;
+
+ u32 clk_lane_switch_cnt;
+ u32 hl_switch_cnt;
+
+ u32 rsvd11[6];
+
+ /* timings based on dphy spec */
+ u8 tclk_miss;
+ u8 tclk_post;
+ u8 rsvd12;
+ u8 tclk_pre;
+ u8 tclk_prepare;
+ u8 tclk_settle;
+ u8 tclk_term_enable;
+ u8 tclk_trail;
+ u16 tclk_prepare_clkzero;
+ u8 rsvd13;
+ u8 td_term_enable;
+ u8 teot;
+ u8 ths_exit;
+ u8 ths_prepare;
+ u16 ths_prepare_hszero;
+ u8 rsvd14;
+ u8 ths_settle;
+ u8 ths_skip;
+ u8 ths_trail;
+ u8 tinit;
+ u8 tlpx;
+ u8 rsvd15[3];
+
+ /* GPIOs */
+ u8 panel_enable;
+ u8 bl_enable;
+ u8 pwm_enable;
+ u8 reset_r_n;
+ u8 pwr_down_r;
+ u8 stdby_r_n;
+
+} __packed;
+
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data
+ * block below
+ *
+ * all delays has a unit of 100us
+ */
+struct mipi_pps_data {
+ u16 panel_on_delay;
+ u16 bl_enable_delay;
+ u16 bl_disable_delay;
+ u16 panel_off_delay;
+ u16 panel_power_cycle_delay;
+};
+
+struct bdb_mipi_config {
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+};
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+ u8 version;
+ u8 data[0];
+};
+
+/* MIPI Sequnece Block definitions */
+enum mipi_seq {
+ MIPI_SEQ_UNDEFINED = 0,
+ MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_UNDEFINED = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_STATUS,
+ MIPI_SEQ_ELEM_MAX
+};
+
+enum mipi_gpio_pin_index {
+ MIPI_GPIO_UNDEFINED = 0,
+ MIPI_GPIO_PANEL_ENABLE,
+ MIPI_GPIO_BL_ENABLE,
+ MIPI_GPIO_PWM_ENABLE,
+ MIPI_GPIO_RESET_N,
+ MIPI_GPIO_PWR_DOWN_R,
+ MIPI_GPIO_STDBY_RST_N,
+ MIPI_GPIO_MAX
+};
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edf..5a045d3bd77 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -24,50 +24,233 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/dmi.h>
#include <linux/i2c.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ /* DPMS state is stored in the connector, which we need in the
+ * encoder's enable/disable callbacks */
+ struct intel_connector *connector;
+ bool force_hotplug_required;
+ u32 adpa_reg;
+};
+
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_crt, base);
+}
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+ return intel_encoder_to_crt(intel_attached_encoder(connector));
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (!(tmp & ADPA_DAC_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp, flags = 0;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ return flags;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ int dotclock;
+
+ pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+}
+
+static void hsw_crt_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ intel_ddi_get_config(encoder, pipe_config);
+
+ pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+}
+
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp, reg;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ u32 adpa;
+
+ if (INTEL_INFO(dev)->gen >= 5)
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
- if (IS_IRONLAKE(dev))
- reg = PCH_ADPA;
+ /* For CPT allow 3 pipe config, for others just use A or B */
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
+ else if (crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
else
- reg = ADPA;
+ adpa |= ADPA_PIPE_B_SELECT;
- temp = I915_READ(reg);
- temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
- temp &= ~ADPA_DAC_ENABLE;
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(crtc->pipe), 0);
- switch(mode) {
+ switch (mode) {
case DRM_MODE_DPMS_ON:
- temp |= ADPA_DAC_ENABLE;
+ adpa |= ADPA_DAC_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
- temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_SUSPEND:
- temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_OFF:
- temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
}
- I915_WRITE(reg, temp);
+ I915_WRITE(crt->adpa_reg, adpa);
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+ intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+/* Special dpms function to support cloning between dvo/sdvo/crt. */
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int old_dpms;
+
+ /* PCH platforms and VLV only support on/off. */
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = encoder->base.crtc;
+ if (!crtc) {
+ encoder->connectors_active = false;
+ return;
+ }
+
+ /* We need the pipe to run for anything but OFF. */
+ if (mode == DRM_MODE_DPMS_OFF)
+ encoder->connectors_active = false;
+ else
+ encoder->connectors_active = true;
+
+ /* We call connector dpms manually below in case pipe dpms doesn't
+ * change due to cloning. */
+ if (mode < old_dpms) {
+ /* From off to on, enable the pipe first. */
+ intel_crtc_update_dpms(crtc);
+
+ intel_crt_set_dpms(encoder, mode);
+ } else {
+ intel_crt_set_dpms(encoder, mode);
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
}
-static int intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -78,109 +261,116 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
max_clock = 350000;
else
max_clock = 400000;
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
-static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
+
+ if (HAS_PCH_SPLIT(dev))
+ pipe_config->has_pch_encoder = true;
+
+ /* LPT FDI RX only supports 8bpc. */
+ if (HAS_PCH_LPT(dev))
+ pipe_config->pipe_bpp = 24;
+
+ /* FDI must always be 2.7 GHz */
+ if (HAS_DDI(dev))
+ pipe_config->port_clock = 135000 * 2;
+
return true;
}
-static void intel_crt_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
-
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_md_reg;
- u32 adpa, dpll_md;
- u32 adpa_reg;
+ u32 adpa;
+ bool ret;
- if (intel_crtc->pipe == 0)
- dpll_md_reg = DPLL_A_MD;
- else
- dpll_md_reg = DPLL_B_MD;
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ u32 save_adpa;
- if (IS_IRONLAKE(dev))
- adpa_reg = PCH_ADPA;
- else
- adpa_reg = ADPA;
+ crt->force_hotplug_required = 0;
- /*
- * Disable separate mode multiplier used when cloning SDVO to CRT
- * XXX this needs to be adjusted when we really are cloning
- */
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
- dpll_md = I915_READ(dpll_md_reg);
- I915_WRITE(dpll_md_reg,
- dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
- }
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
- adpa = 0;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- adpa |= ADPA_HSYNC_ACTIVE_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
- if (intel_crtc->pipe == 0) {
- adpa |= ADPA_PIPE_A_SELECT;
- if (!IS_IRONLAKE(dev))
- I915_WRITE(BCLRPAT_A, 0);
- } else {
- adpa |= ADPA_PIPE_B_SELECT;
- if (!IS_IRONLAKE(dev))
- I915_WRITE(BCLRPAT_B, 0);
+ I915_WRITE(crt->adpa_reg, adpa);
+
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+ I915_WRITE(crt->adpa_reg, save_adpa);
+ POSTING_READ(crt->adpa_reg);
+ }
}
- I915_WRITE(adpa_reg, adpa);
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(crt->adpa_reg);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+ DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ return ret;
}
-static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
+ u32 save_adpa;
- adpa = I915_READ(PCH_ADPA);
-
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
- adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
- ADPA_CRT_HOTPLUG_WARMUP_10MS |
- ADPA_CRT_HOTPLUG_SAMPLE_4S |
- ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
- ADPA_CRT_HOTPLUG_VOLREF_325MV |
- ADPA_CRT_HOTPLUG_ENABLE |
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
- I915_WRITE(PCH_ADPA, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
- while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
- ;
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000)) {
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ I915_WRITE(crt->adpa_reg, save_adpa);
+ }
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(PCH_ADPA);
- adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
- if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
- (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
+ adpa = I915_READ(crt->adpa_reg);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
+ DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
return ret;
}
@@ -196,12 +386,16 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 hotplug_en;
+ u32 hotplug_en, orig, stat;
+ bool ret = false;
int i, tries = 0;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
+ if (IS_VALLEYVIEW(dev))
+ return valleyview_crt_detect_hotplug(connector);
+
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
@@ -211,55 +405,107 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
tries = 2;
else
tries = 1;
- hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- hotplug_en &= CRT_FORCE_HOTPLUG_MASK;
+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
- if (IS_G4X(dev))
- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
-
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
-
for (i = 0; i < tries ; i++) {
- unsigned long timeout;
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
- do {
- if (!(I915_READ(PORT_HOTPLUG_EN) &
- CRT_HOTPLUG_FORCE_DETECT))
- break;
- msleep(1);
- } while (time_after(timeout, jiffies));
+ if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
- CRT_HOTPLUG_MONITOR_NONE)
- return true;
+ stat = I915_READ(PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
- return false;
+ /* and put the bits back */
+ I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+ return ret;
+}
+
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *i2c)
+{
+ struct edid *edid;
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
+
+ return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = intel_crt_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct edid *edid;
+ struct i2c_adapter *i2c;
- /* CRT should always be at 0, but check anyway */
- if (intel_output->type != INTEL_OUTPUT_ANALOG)
- return false;
+ BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ edid = intel_crt_get_edid(connector, i2c);
+
+ if (edid) {
+ bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+
+ /*
+ * This may be a DVI-I connector with a shared DDC
+ * link between analog and digital outputs, so we
+ * have to check the EDID input spec of the attached device.
+ */
+ if (!is_digital) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ }
- return intel_ddc_probe(intel_output);
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ }
+
+ kfree(edid);
+
+ return false;
}
static enum drm_connector_status
-intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
+intel_crt_load_detect(struct intel_crt *crt)
{
- struct drm_encoder *encoder = &intel_output->enc;
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pipe = intel_crtc->pipe;
+ uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
@@ -275,21 +521,14 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
uint8_t st00;
enum drm_connector_status status;
- if (pipe == 0) {
- bclrpat_reg = BCLRPAT_A;
- vtotal_reg = VTOTAL_A;
- vblank_reg = VBLANK_A;
- vsync_reg = VSYNC_A;
- pipeconf_reg = PIPEACONF;
- pipe_dsl_reg = PIPEADSL;
- } else {
- bclrpat_reg = BCLRPAT_B;
- vtotal_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipeconf_reg = PIPEBCONF;
- pipe_dsl_reg = PIPEBDSL;
- }
+ DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
save_bclrpat = I915_READ(bclrpat_reg);
save_vtotal = I915_READ(vtotal_reg);
@@ -304,12 +543,13 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -380,81 +620,108 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
return status;
}
-static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_encoder *encoder = &intel_output->enc;
- struct drm_crtc *crtc;
- int dpms_mode;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_encoder *intel_encoder = &crt->base;
+ enum intel_display_power_domain power_domain;
enum drm_connector_status status;
+ struct intel_load_detect_pipe tmp;
+ struct drm_modeset_acquire_ctx ctx;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- if (intel_crt_detect_hotplug(connector))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
+ intel_runtime_pm_get(dev_priv);
- if (intel_crt_detect_ddc(connector))
- return connector_status_connected;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name,
+ force);
- /* for pre-945g platforms use load detect */
- if (encoder->crtc && encoder->crtc->enabled) {
- status = intel_crt_load_detect(encoder->crtc, intel_output);
- } else {
- crtc = intel_get_load_detect_pipe(intel_output,
- NULL, &dpms_mode);
- if (crtc) {
- status = intel_crt_load_detect(crtc, intel_output);
- intel_release_load_detect_pipe(intel_output, dpms_mode);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* We can not rely on the HPD pin always being correctly wired
+ * up, for example many KVM do not pass it through, and so
+ * only trust an assertion that the monitor is connected.
+ */
+ if (intel_crt_detect_hotplug(connector)) {
+ DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ status = connector_status_connected;
+ goto out;
} else
- status = connector_status_unknown;
+ DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+ }
+
+ if (intel_crt_detect_ddc(connector)) {
+ status = connector_status_connected;
+ goto out;
+ }
+
+ /* Load detection is broken on HPD capable machines. Whoever wants a
+ * broken monitor (without edid) to work behind a broken kvm (that fails
+ * to have the right resistors for HP detection) needs to fix this up.
+ * For now just bail out. */
+ if (I915_HAS_HOTPLUG(dev)) {
+ status = connector_status_disconnected;
+ goto out;
}
+ if (!force) {
+ status = connector->status;
+ goto out;
+ }
+
+ /* for pre-945g platforms use load detect */
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
+ if (intel_crt_detect_ddc(connector))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crt);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
+ } else
+ status = connector_status_unknown;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+ intel_runtime_pm_put(dev_priv);
+
return status;
}
static void intel_crt_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
-
- intel_i2c_destroy(intel_output->ddc_bus);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static int intel_crt_get_modes(struct drm_connector *connector)
{
- int ret;
- struct intel_output *intel_output = to_intel_output(connector);
- struct i2c_adapter *ddcbus;
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_encoder *intel_encoder = &crt->base;
+ enum intel_display_power_domain power_domain;
+ int ret;
+ struct i2c_adapter *i2c;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
- ret = intel_ddc_get_modes(intel_output);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
- goto end;
+ goto out;
- ddcbus = intel_output->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- intel_output->ddc_bus =
- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
-
- if (!intel_output->ddc_bus) {
- intel_output->ddc_bus = ddcbus;
- dev_printk(KERN_ERR, &connector->dev->pdev->dev,
- "DDC bus registration failed for CRTDDC_D.\n");
- goto end;
- }
- /* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(intel_output);
- intel_i2c_destroy(ddcbus);
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
-end:
- return ret;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+ return ret;
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -464,20 +731,34 @@ static int intel_crt_set_property(struct drm_connector *connector,
return 0;
}
+static void intel_crt_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_attached_crt(connector);
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ u32 adpa;
+
+ adpa = I915_READ(crt->adpa_reg);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(crt->adpa_reg, adpa);
+ POSTING_READ(crt->adpa_reg);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
+
+}
+
/*
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
- .dpms = intel_crt_dpms,
- .mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
- .mode_set = intel_crt_mode_set,
-};
-
static const struct drm_connector_funcs intel_crt_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .reset = intel_crt_reset,
+ .dpms = intel_crt_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
@@ -490,64 +771,125 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
.best_encoder = intel_best_encoder,
};
-static void intel_crt_enc_destroy(struct drm_encoder *encoder)
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
- drm_encoder_cleanup(encoder);
+ DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
+ return 1;
}
-static const struct drm_encoder_funcs intel_crt_enc_funcs = {
- .destroy = intel_crt_enc_destroy,
+static const struct dmi_system_id intel_no_crt[] = {
+ {
+ .callback = intel_no_crt_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ {
+ .callback = intel_no_crt_dmi_callback,
+ .ident = "DELL XPS 8700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
+ },
+ },
+ { }
};
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_crt *crt;
+ struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 i2c_reg;
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output)
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_no_crt))
+ return;
+
+ crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+ if (!crt)
+ return;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(crt);
return;
+ }
- connector = &intel_output->base;
- drm_connector_init(dev, &intel_output->base,
+ connector = &intel_connector->base;
+ crt->connector = intel_connector;
+ drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
-
- /* Set up the DDC bus. */
- if (IS_IRONLAKE(dev))
- i2c_reg = PCH_GPIOA;
- else {
- i2c_reg = GPIOA;
- /* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != 0)
- i2c_reg = dev_priv->crt_ddc_bus;
- }
- intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
- if (!intel_output->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- return;
- }
+ intel_connector_attach_encoder(intel_connector, &crt->base);
- intel_output->type = INTEL_OUTPUT_ANALOG;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
- connector->interlace_allowed = 0;
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
+ if (IS_I830(dev))
+ crt->base.crtc_mask = (1 << 0);
+ else
+ crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ if (IS_GEN2(dev))
+ connector->interlace_allowed = 0;
+ else
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+ if (HAS_PCH_SPLIT(dev))
+ crt->adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ crt->adpa_reg = VLV_ADPA;
+ else
+ crt->adpa_reg = ADPA;
+
+ crt->base.compute_config = intel_crt_compute_config;
+ crt->base.disable = intel_disable_crt;
+ crt->base.enable = intel_enable_crt;
+ if (I915_HAS_HOTPLUG(dev))
+ crt->base.hpd_pin = HPD_CRT;
+ if (HAS_DDI(dev)) {
+ crt->base.get_config = hsw_crt_get_config;
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ } else {
+ crt->base.get_config = intel_crt_get_config;
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ }
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
+
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
- dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+ if (!I915_HAS_HOTPLUG(dev))
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ /*
+ * Configure the automatic hotplug detection stuff
+ */
+ crt->force_hotplug_required = 0;
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
+ */
+ if (HAS_PCH_LPT(dev)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+ dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+ }
+
+ intel_crt_reset(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 00000000000..b17b9c7c769
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,1720 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00D75FFF, 0x0005000A,
+ 0x00C30FFF, 0x00040006,
+ 0x80AAAFFF, 0x000B0000,
+ 0x00FFFFFF, 0x0005000A,
+ 0x00D75FFF, 0x000C0004,
+ 0x80C30FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006,
+ 0x80D75FFF, 0x000B0000,
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00D75FFF, 0x000F000A,
+ 0x00C30FFF, 0x00060006,
+ 0x00AAAFFF, 0x001E0000,
+ 0x00FFFFFF, 0x000F000A,
+ 0x00D75FFF, 0x00160004,
+ 0x00C30FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00060006,
+ 0x00D75FFF, 0x001E0000,
+};
+
+static const u32 hsw_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff T mV diff db */
+ 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
+ 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
+ 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
+ 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
+ 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
+ 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
+ 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
+ 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
+ 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
+ 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
+ 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
+ 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
+};
+
+static const u32 bdw_ddi_translations_edp[] = {
+ 0x00FFFFFF, 0x00000012, /* eDP parameters */
+ 0x00EBAFFF, 0x00020011,
+ 0x00C71FFF, 0x0006000F,
+ 0x00FFFFFF, 0x00020011,
+ 0x00DB6FFF, 0x0005000F,
+ 0x00BEEFFF, 0x000A000C,
+ 0x00FFFFFF, 0x0005000F,
+ 0x00DB6FFF, 0x000A000C,
+ 0x00FFFFFF, 0x000A000C,
+ 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
+};
+
+static const u32 bdw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0007000E, /* DP parameters */
+ 0x00D75FFF, 0x000E000A,
+ 0x00BEFFFF, 0x00140006,
+ 0x00FFFFFF, 0x000E000A,
+ 0x00D75FFF, 0x00180004,
+ 0x80CB2FFF, 0x001B0002,
+ 0x00F7DFFF, 0x00180004,
+ 0x80D75FFF, 0x001B0002,
+ 0x80FFFFFF, 0x001B0002,
+ 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
+};
+
+static const u32 bdw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0001000E, /* FDI parameters */
+ 0x00D75FFF, 0x0004000A,
+ 0x00C30FFF, 0x00070006,
+ 0x00AAAFFF, 0x000C0000,
+ 0x00FFFFFF, 0x0004000A,
+ 0x00D75FFF, 0x00090004,
+ 0x00C30FFF, 0x000C0000,
+ 0x00FFFFFF, 0x00070006,
+ 0x00D75FFF, 0x000C0000,
+ 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
+};
+
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+ return intel_dig_port->port;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ return PORT_E;
+
+ } else {
+ DRM_ERROR("Invalid DDI encoder type %d\n", type);
+ BUG();
+ }
+}
+
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ int i;
+ int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+ const u32 *ddi_translations_fdi;
+ const u32 *ddi_translations_dp;
+ const u32 *ddi_translations_edp;
+ const u32 *ddi_translations;
+
+ if (IS_BROADWELL(dev)) {
+ ddi_translations_fdi = bdw_ddi_translations_fdi;
+ ddi_translations_dp = bdw_ddi_translations_dp;
+ ddi_translations_edp = bdw_ddi_translations_edp;
+ } else if (IS_HASWELL(dev)) {
+ ddi_translations_fdi = hsw_ddi_translations_fdi;
+ ddi_translations_dp = hsw_ddi_translations_dp;
+ ddi_translations_edp = hsw_ddi_translations_dp;
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ ddi_translations_edp = bdw_ddi_translations_dp;
+ ddi_translations_fdi = bdw_ddi_translations_fdi;
+ ddi_translations_dp = bdw_ddi_translations_dp;
+ }
+
+ switch (port) {
+ case PORT_A:
+ ddi_translations = ddi_translations_edp;
+ break;
+ case PORT_B:
+ case PORT_C:
+ ddi_translations = ddi_translations_dp;
+ break;
+ case PORT_D:
+ if (intel_dp_is_edp(dev, PORT_D))
+ ddi_translations = ddi_translations_edp;
+ else
+ ddi_translations = ddi_translations_dp;
+ break;
+ case PORT_E:
+ ddi_translations = ddi_translations_fdi;
+ break;
+ default:
+ BUG();
+ }
+
+ for (i = 0, reg = DDI_BUF_TRANS(port);
+ i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ I915_WRITE(reg, ddi_translations[i]);
+ reg += 4;
+ }
+ /* Entry 9 is for HDMI: */
+ for (i = 0; i < 2; i++) {
+ I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
+ reg += 4;
+ }
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+ int port;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ for (port = PORT_A; port <= PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port);
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+ DDI_BUF_EMP_400MV_0DB_HSW,
+ DDI_BUF_EMP_400MV_3_5DB_HSW,
+ DDI_BUF_EMP_400MV_6DB_HSW,
+ DDI_BUF_EMP_400MV_9_5DB_HSW,
+ DDI_BUF_EMP_600MV_0DB_HSW,
+ DDI_BUF_EMP_600MV_3_5DB_HSW,
+ DDI_BUF_EMP_600MV_6DB_HSW,
+ DDI_BUF_EMP_800MV_0DB_HSW,
+ DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ udelay(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 temp, i, rx_ctl_val;
+
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ *
+ * WaFDIAutoLinkSetTimingOverrride:hsw
+ */
+ I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+ FDI_RX_PWRDN_LANE0_VAL(2) |
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE |
+ FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
+ I915_WRITE(DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE |
+ ((intel_crtc->config.fdi_lanes - 1) << 1) |
+ hsw_ddi_buf_ctl_values[i / 2]);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ udelay(600);
+
+ /* Program PCH FDI Receiver TU */
+ I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+
+ /* Wait for FDI auto training time */
+ udelay(5);
+
+ temp = I915_READ(DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+
+ /* Enable normal pixel sending for FDI */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ return;
+ }
+
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+ }
+
+ DRM_ERROR("FDI link training failed!\n");
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+
+ if (num_encoders != 1)
+ WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
+ pipe_name(intel_crtc->pipe));
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t val;
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ plls->spll_refcount--;
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ plls->wrpll1_refcount--;
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+ val = I915_READ(WRPLL_CTL1);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL1);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ plls->wrpll2_refcount--;
+ if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+ val = I915_READ(WRPLL_CTL2);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL2);
+ }
+ break;
+ }
+
+ WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+ WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+ WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K (LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a))
+
+struct wrpll_rnp {
+ unsigned p, n2, r2;
+};
+
+static unsigned wrpll_get_budget_for_freq(int clock)
+{
+ unsigned budget;
+
+ switch (clock) {
+ case 25175000:
+ case 25200000:
+ case 27000000:
+ case 27027000:
+ case 37762500:
+ case 37800000:
+ case 40500000:
+ case 40541000:
+ case 54000000:
+ case 54054000:
+ case 59341000:
+ case 59400000:
+ case 72000000:
+ case 74176000:
+ case 74250000:
+ case 81000000:
+ case 81081000:
+ case 89012000:
+ case 89100000:
+ case 108000000:
+ case 108108000:
+ case 111264000:
+ case 111375000:
+ case 148352000:
+ case 148500000:
+ case 162000000:
+ case 162162000:
+ case 222525000:
+ case 222750000:
+ case 296703000:
+ case 297000000:
+ budget = 0;
+ break;
+ case 233500000:
+ case 245250000:
+ case 247750000:
+ case 253250000:
+ case 298000000:
+ budget = 1500;
+ break;
+ case 169128000:
+ case 169500000:
+ case 179500000:
+ case 202000000:
+ budget = 2000;
+ break;
+ case 256250000:
+ case 262500000:
+ case 270000000:
+ case 272500000:
+ case 273750000:
+ case 280750000:
+ case 281250000:
+ case 286000000:
+ case 291750000:
+ budget = 4000;
+ break;
+ case 267250000:
+ case 268500000:
+ budget = 5000;
+ break;
+ default:
+ budget = 1000;
+ break;
+ }
+
+ return budget;
+}
+
+static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+ unsigned r2, unsigned n2, unsigned p,
+ struct wrpll_rnp *best)
+{
+ uint64_t a, b, c, d, diff, diff_best;
+
+ /* No best (r,n,p) yet */
+ if (best->p == 0) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ return;
+ }
+
+ /*
+ * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+ * freq2k.
+ *
+ * delta = 1e6 *
+ * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+ * freq2k;
+ *
+ * and we would like delta <= budget.
+ *
+ * If the discrepancy is above the PPM-based budget, always prefer to
+ * improve upon the previous solution. However, if you're within the
+ * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+ */
+ a = freq2k * budget * p * r2;
+ b = freq2k * budget * best->p * best->r2;
+ diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2));
+ diff_best = ABS_DIFF((freq2k * best->p * best->r2),
+ (LC_FREQ_2K * best->n2));
+ c = 1000000 * diff;
+ d = 1000000 * diff_best;
+
+ if (a < c && b < d) {
+ /* If both are above the budget, pick the closer */
+ if (best->p * best->r2 * diff < p * r2 * diff_best) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ } else if (a >= c && b < d) {
+ /* If A is below the threshold but B is above it? Update. */
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ } else if (a >= c && b >= d) {
+ /* Both are below the limit, so pick the higher n2/(r2*r2) */
+ if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ }
+ /* Otherwise a < c && b >= d, do nothing */
+}
+
+static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ int reg)
+{
+ int refclk = LC_FREQ;
+ int n, p, r;
+ u32 wrpll;
+
+ wrpll = I915_READ(reg);
+ switch (wrpll & SPLL_PLL_REF_MASK) {
+ case SPLL_PLL_SSC:
+ case SPLL_PLL_NON_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = 135;
+ break;
+ case SPLL_PLL_LCPLL:
+ refclk = LC_FREQ;
+ break;
+ default:
+ WARN(1, "bad wrpll refclk\n");
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n * 100) / (p * r);
+}
+
+static void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ int link_clock = 0;
+ u32 val, pll;
+
+ val = I915_READ(PORT_CLK_SEL(port));
+ switch (val & PORT_CLK_SEL_MASK) {
+ case PORT_CLK_SEL_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+ break;
+ case PORT_CLK_SEL_SPLL:
+ pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
+ if (pll == SPLL_PLL_FREQ_810MHz)
+ link_clock = 81000;
+ else if (pll == SPLL_PLL_FREQ_1350MHz)
+ link_clock = 135000;
+ else if (pll == SPLL_PLL_FREQ_2700MHz)
+ link_clock = 270000;
+ else {
+ WARN(1, "bad spll freq\n");
+ return;
+ }
+ break;
+ default:
+ WARN(1, "bad port clock sel\n");
+ return;
+ }
+
+ pipe_config->port_clock = link_clock * 2;
+
+ if (pipe_config->has_pch_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->fdi_m_n);
+ else if (pipe_config->has_dp_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
+static void
+intel_ddi_calculate_wrpll(int clock /* in Hz */,
+ unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+ uint64_t freq2k;
+ unsigned p, n2, r2;
+ struct wrpll_rnp best = { 0, 0, 0 };
+ unsigned budget;
+
+ freq2k = clock / 100;
+
+ budget = wrpll_get_budget_for_freq(clock);
+
+ /* Special case handling for 540 pixel clock: bypass WR PLL entirely
+ * and directly pass the LC PLL to it. */
+ if (freq2k == 5400000) {
+ *n2_out = 2;
+ *p_out = 1;
+ *r2_out = 2;
+ return;
+ }
+
+ /*
+ * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+ * the WR PLL.
+ *
+ * We want R so that REF_MIN <= Ref <= REF_MAX.
+ * Injecting R2 = 2 * R gives:
+ * REF_MAX * r2 > LC_FREQ * 2 and
+ * REF_MIN * r2 < LC_FREQ * 2
+ *
+ * Which means the desired boundaries for r2 are:
+ * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+ *
+ */
+ for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+ r2 <= LC_FREQ * 2 / REF_MIN;
+ r2++) {
+
+ /*
+ * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+ *
+ * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+ * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+ * VCO_MAX * r2 > n2 * LC_FREQ and
+ * VCO_MIN * r2 < n2 * LC_FREQ)
+ *
+ * Which means the desired boundaries for n2 are:
+ * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+ */
+ for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+ n2 <= VCO_MAX * r2 / LC_FREQ;
+ n2++) {
+
+ for (p = P_MIN; p <= P_MAX; p += P_INC)
+ wrpll_update_rnp(freq2k, budget,
+ r2, n2, p, &best);
+ }
+ }
+
+ *n2_out = best.n2;
+ *p_out = best.p;
+ *r2_out = best.r2;
+}
+
+/*
+ * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
+ * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
+ * steal the selected PLL. You need to call intel_ddi_pll_enable to actually
+ * enable the PLL.
+ */
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int type = intel_encoder->type;
+ enum pipe pipe = intel_crtc->pipe;
+ int clock = intel_crtc->config.port_clock;
+
+ intel_ddi_put_crtc_pll(crtc);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ default:
+ DRM_ERROR("Link bandwidth %d unsupported\n",
+ intel_dp->link_bw);
+ return false;
+ }
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ uint32_t reg, val;
+ unsigned p, n2, r2;
+
+ intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ if (val == I915_READ(WRPLL_CTL1)) {
+ DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL1;
+ } else if (val == I915_READ(WRPLL_CTL2)) {
+ DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL2;
+ } else if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL1;
+ } else if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL2;
+ } else {
+ DRM_ERROR("No WRPLLs available!\n");
+ return false;
+ }
+
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, p, n2, r2);
+
+ if (reg == WRPLL_CTL1) {
+ plls->wrpll1_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else {
+ plls->wrpll2_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ }
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+ pipe_name(pipe));
+ plls->spll_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ } else {
+ DRM_ERROR("SPLL already in use\n");
+ return false;
+ }
+
+ } else {
+ WARN(1, "Invalid DDI encoder type %d\n", type);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * To be called after intel_ddi_pll_select(). That one selects the PLL to be
+ * used, this one actually enables the PLL.
+ */
+void intel_ddi_pll_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int clock = crtc->config.port_clock;
+ uint32_t reg, cur_val, new_val;
+ int refcount;
+ const char *pll_name;
+ uint32_t enable_bit = (1 << 31);
+ unsigned int p, n2, r2;
+
+ BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
+ BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
+
+ switch (crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_LCPLL_2700:
+ case PORT_CLK_SEL_LCPLL_1350:
+ case PORT_CLK_SEL_LCPLL_810:
+ /*
+ * LCPLL should always be enabled at this point of the mode set
+ * sequence, so nothing to do.
+ */
+ return;
+
+ case PORT_CLK_SEL_SPLL:
+ pll_name = "SPLL";
+ reg = SPLL_CTL;
+ refcount = plls->spll_refcount;
+ new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SSC;
+ break;
+
+ case PORT_CLK_SEL_WRPLL1:
+ case PORT_CLK_SEL_WRPLL2:
+ if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
+ pll_name = "WRPLL1";
+ reg = WRPLL_CTL1;
+ refcount = plls->wrpll1_refcount;
+ } else {
+ pll_name = "WRPLL2";
+ reg = WRPLL_CTL2;
+ refcount = plls->wrpll2_refcount;
+ }
+
+ intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
+
+ break;
+
+ case PORT_CLK_SEL_NONE:
+ WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
+ return;
+ default:
+ WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
+ return;
+ }
+
+ cur_val = I915_READ(reg);
+
+ WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
+ if (refcount == 1) {
+ WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
+ I915_WRITE(reg, new_val);
+ POSTING_READ(reg);
+ udelay(20);
+ } else {
+ WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
+ }
+}
+
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ BUG();
+ }
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ }
+}
+
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ temp |= TRANS_DDI_SELECT_PORT(port);
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ temp |= TRANS_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= TRANS_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= TRANS_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= TRANS_DDI_BPC_12;
+ break;
+ default:
+ BUG();
+ }
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ case PIPE_A:
+ /* On Haswell, can only use the always-on power well for
+ * eDP when not using the panel fitter, and when not
+ * using motion blur mitigation (which we don't
+ * support). */
+ if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ else
+ temp |= TRANS_DDI_EDP_INPUT_A_ON;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc->config.has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
+
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ } else {
+ WARN(1, "Invalid encoder type %d for pipe %c\n",
+ intel_encoder->type, pipe_name(pipe));
+ }
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ int type = intel_connector->base.connector_type;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum pipe pipe = 0;
+ enum transcoder cpu_transcoder;
+ enum intel_display_power_domain power_domain;
+ uint32_t tmp;
+
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+ return false;
+
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
+ else
+ cpu_transcoder = (enum transcoder) pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ return true;
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ return (type == DRM_MODE_CONNECTOR_DisplayPort);
+
+ case TRANS_DDI_MODE_SELECT_FDI:
+ return (type == DRM_MODE_CONNECTOR_VGA);
+
+ default:
+ return false;
+ }
+}
+
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+ int i;
+
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ tmp = I915_READ(DDI_BUF_CTL(port));
+
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ return false;
+
+ if (port == PORT_A) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ }
+
+ return true;
+ } else {
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK)
+ == TRANS_DDI_SELECT_PORT(port)) {
+ *pipe = i;
+ return true;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+
+ return false;
+}
+
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t temp, ret;
+ enum port port = I915_MAX_PORTS;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int i;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ port = PORT_A;
+ } else {
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp &= TRANS_DDI_PORT_MASK;
+
+ for (i = PORT_B; i <= PORT_E; i++)
+ if (temp == TRANS_DDI_SELECT_PORT(i))
+ port = i;
+ }
+
+ if (port == I915_MAX_PORTS) {
+ WARN(1, "Pipe %c enabled on an unknown port\n",
+ pipe_name(pipe));
+ ret = PORT_CLK_SEL_NONE;
+ } else {
+ ret = I915_READ(PORT_CLK_SEL(port));
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
+ "0x%08x\n", pipe_name(pipe), port_name(port),
+ ret);
+ }
+
+ return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
+
+ dev_priv->ddi_plls.spll_refcount = 0;
+ dev_priv->ddi_plls.wrpll1_refcount = 0;
+ dev_priv->ddi_plls.wrpll2_refcount = 0;
+
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (!intel_crtc->active) {
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+ continue;
+ }
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ dev_priv->ddi_plls.spll_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ dev_priv->ddi_plls.wrpll1_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ dev_priv->ddi_plls.wrpll2_refcount++;
+ break;
+ }
+ }
+}
+
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (crtc->config.has_audio) {
+ DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
+ pipe_name(crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
+ intel_write_eld(encoder, &crtc->config.adjusted_mode);
+ }
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_edp_panel_on(intel_dp);
+ }
+
+ WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ if (port != PORT_A)
+ intel_dp_stop_link_train(intel_dp);
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_hdmi->set_infoframes(encoder,
+ crtc->config.has_hdmi_sink,
+ &crtc->config.adjusted_mode);
+ }
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t val;
+ bool wait = false;
+
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_off(intel_dp);
+ }
+
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t tmp;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (port == PORT_A)
+ intel_dp_stop_link_train(intel_dp);
+
+ intel_edp_backlight_on(intel_dp);
+ intel_edp_psr_enable(intel_dp);
+ }
+
+ if (intel_crtc->config.has_audio) {
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ }
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int type = intel_encoder->type;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
+ * register is part of the power well on Haswell. */
+ if (intel_crtc->config.has_audio) {
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
+ (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ }
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_edp_psr_disable(intel_dp);
+ intel_edp_backlight_off(intel_dp);
+ }
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+ uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK) {
+ return 800000;
+ } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) {
+ return 450000;
+ } else if (freq == LCPLL_CLK_FREQ_450) {
+ return 450000;
+ } else if (IS_HASWELL(dev)) {
+ if (IS_ULT(dev))
+ return 337500;
+ else
+ return 540000;
+ } else {
+ if (freq == LCPLL_CLK_FREQ_54O_BDW)
+ return 540000;
+ else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+ return 337500;
+ else
+ return 675000;
+ }
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ /* The LCPLL register should be turned on by the BIOS. For now let's
+ * just check its state and print errors in case something is wrong.
+ * Don't even try to turn it on.
+ */
+
+ DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_dig_port->port;
+ uint32_t val;
+ bool wait = false;
+
+ if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+ POSTING_READ(DDI_BUF_CTL(port));
+
+ udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ uint32_t val;
+
+ intel_ddi_post_disable(intel_encoder);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_MISC);
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+ intel_dp_check_link_status(intel_dp);
+}
+
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ u32 temp, flags = 0;
+
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (temp & TRANS_DDI_PHSYNC)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (temp & TRANS_DDI_PVSYNC)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+
+ switch (temp & TRANS_DDI_BPC_MASK) {
+ case TRANS_DDI_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case TRANS_DDI_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case TRANS_DDI_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case TRANS_DDI_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+
+ switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ pipe_config->has_hdmi_sink = true;
+ case TRANS_DDI_MODE_SELECT_DVI:
+ case TRANS_DDI_MODE_SELECT_FDI:
+ break;
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ pipe_config->has_dp_encoder = true;
+ intel_dp_get_m_n(intel_crtc, pipe_config);
+ break;
+ default:
+ break;
+ }
+
+ if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
+ pipe_config->has_audio = true;
+ }
+
+ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ }
+
+ intel_ddi_clock_get(encoder, pipe_config);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ int type = encoder->type;
+ int port = intel_ddi_get_encoder_port(encoder);
+
+ WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
+
+ if (port == PORT_A)
+ pipe_config->cpu_transcoder = TRANSCODER_EDP;
+
+ if (type == INTEL_OUTPUT_HDMI)
+ return intel_hdmi_compute_config(encoder, pipe_config);
+ else
+ return intel_dp_compute_config(encoder, pipe_config);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .destroy = intel_ddi_destroy,
+};
+
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = intel_dig_port->port;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ if (!intel_dp_init_connector(intel_dig_port, connector)) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = intel_dig_port->port;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_hdmi_init_connector(intel_dig_port, connector);
+
+ return connector;
+}
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *hdmi_connector = NULL;
+ struct intel_connector *dp_connector = NULL;
+ bool init_hdmi, init_dp;
+
+ init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
+ dev_priv->vbt.ddi_port_info[port].supports_hdmi);
+ init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+ if (!init_dp && !init_hdmi) {
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+ port_name(port));
+ init_hdmi = true;
+ init_dp = true;
+ }
+
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ intel_encoder->compute_config = intel_ddi_compute_config;
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->pre_enable = intel_ddi_pre_enable;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ intel_encoder->get_config = intel_ddi_get_config;
+
+ intel_dig_port->port = port;
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL |
+ DDI_A_4_LANES);
+
+ intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = 0;
+ intel_encoder->hot_plug = intel_ddi_hot_plug;
+
+ if (init_dp)
+ dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+
+ /* In theory we don't need the encoder->type check, but leave it just in
+ * case we have some really bad VBTs... */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
+ hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
+
+ if (!dp_connector && !hdmi_connector) {
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 002612fae71..f0be855ddf4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,534 +24,383 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
-#include "drmP.h"
+#include <linux/slab.h>
+#include <linux/vgaarb.h>
+#include <drm/drm_edid.h>
+#include <drm/drmP.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "drm_dp_helper.h"
-
-#include "drm_crtc_helper.h"
-
-#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-
-bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
-static void intel_update_watermarks(struct drm_device *dev);
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
-
-typedef struct {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
-} intel_clock_t;
+#include "i915_trace.h"
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/dma_remapping.h>
+
+#define DIV_ROUND_CLOSEST_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+
+static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+
+static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+static int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
+static void intel_dp_set_m_n(struct intel_crtc *crtc);
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n);
+static void ironlake_set_pipeconf(struct drm_crtc *crtc);
+static void haswell_set_pipeconf(struct drm_crtc *crtc);
+static void intel_set_pipe_csc(struct drm_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc);
typedef struct {
- int min, max;
+ int min, max;
} intel_range_t;
typedef struct {
- int dot_limit;
- int p2_slow, p2_fast;
+ int dot_limit;
+ int p2_slow, p2_fast;
} intel_p2_t;
-#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
- intel_range_t dot, vco, n, m, m1, m2, p, p1;
- intel_p2_t p2;
- bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
- bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ intel_p2_t p2;
};
-#define I8XX_DOT_MIN 25000
-#define I8XX_DOT_MAX 350000
-#define I8XX_VCO_MIN 930000
-#define I8XX_VCO_MAX 1400000
-#define I8XX_N_MIN 3
-#define I8XX_N_MAX 16
-#define I8XX_M_MIN 96
-#define I8XX_M_MAX 140
-#define I8XX_M1_MIN 18
-#define I8XX_M1_MAX 26
-#define I8XX_M2_MIN 6
-#define I8XX_M2_MAX 16
-#define I8XX_P_MIN 4
-#define I8XX_P_MAX 128
-#define I8XX_P1_MIN 2
-#define I8XX_P1_MAX 33
-#define I8XX_P1_LVDS_MIN 1
-#define I8XX_P1_LVDS_MAX 6
-#define I8XX_P2_SLOW 4
-#define I8XX_P2_FAST 2
-#define I8XX_P2_LVDS_SLOW 14
-#define I8XX_P2_LVDS_FAST 7
-#define I8XX_P2_SLOW_LIMIT 165000
-
-#define I9XX_DOT_MIN 20000
-#define I9XX_DOT_MAX 400000
-#define I9XX_VCO_MIN 1400000
-#define I9XX_VCO_MAX 2800000
-#define PINEVIEW_VCO_MIN 1700000
-#define PINEVIEW_VCO_MAX 3500000
-#define I9XX_N_MIN 1
-#define I9XX_N_MAX 6
-/* Pineview's Ncounter is a ring counter */
-#define PINEVIEW_N_MIN 3
-#define PINEVIEW_N_MAX 6
-#define I9XX_M_MIN 70
-#define I9XX_M_MAX 120
-#define PINEVIEW_M_MIN 2
-#define PINEVIEW_M_MAX 256
-#define I9XX_M1_MIN 10
-#define I9XX_M1_MAX 22
-#define I9XX_M2_MIN 5
-#define I9XX_M2_MAX 9
-/* Pineview M1 is reserved, and must be 0 */
-#define PINEVIEW_M1_MIN 0
-#define PINEVIEW_M1_MAX 0
-#define PINEVIEW_M2_MIN 0
-#define PINEVIEW_M2_MAX 254
-#define I9XX_P_SDVO_DAC_MIN 5
-#define I9XX_P_SDVO_DAC_MAX 80
-#define I9XX_P_LVDS_MIN 7
-#define I9XX_P_LVDS_MAX 98
-#define PINEVIEW_P_LVDS_MIN 7
-#define PINEVIEW_P_LVDS_MAX 112
-#define I9XX_P1_MIN 1
-#define I9XX_P1_MAX 8
-#define I9XX_P2_SDVO_DAC_SLOW 10
-#define I9XX_P2_SDVO_DAC_FAST 5
-#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
-#define I9XX_P2_LVDS_SLOW 14
-#define I9XX_P2_LVDS_FAST 7
-#define I9XX_P2_LVDS_SLOW_LIMIT 112000
-
-/*The parameter is for SDVO on G4x platform*/
-#define G4X_DOT_SDVO_MIN 25000
-#define G4X_DOT_SDVO_MAX 270000
-#define G4X_VCO_MIN 1750000
-#define G4X_VCO_MAX 3500000
-#define G4X_N_SDVO_MIN 1
-#define G4X_N_SDVO_MAX 4
-#define G4X_M_SDVO_MIN 104
-#define G4X_M_SDVO_MAX 138
-#define G4X_M1_SDVO_MIN 17
-#define G4X_M1_SDVO_MAX 23
-#define G4X_M2_SDVO_MIN 5
-#define G4X_M2_SDVO_MAX 11
-#define G4X_P_SDVO_MIN 10
-#define G4X_P_SDVO_MAX 30
-#define G4X_P1_SDVO_MIN 1
-#define G4X_P1_SDVO_MAX 3
-#define G4X_P2_SDVO_SLOW 10
-#define G4X_P2_SDVO_FAST 10
-#define G4X_P2_SDVO_LIMIT 270000
-
-/*The parameter is for HDMI_DAC on G4x platform*/
-#define G4X_DOT_HDMI_DAC_MIN 22000
-#define G4X_DOT_HDMI_DAC_MAX 400000
-#define G4X_N_HDMI_DAC_MIN 1
-#define G4X_N_HDMI_DAC_MAX 4
-#define G4X_M_HDMI_DAC_MIN 104
-#define G4X_M_HDMI_DAC_MAX 138
-#define G4X_M1_HDMI_DAC_MIN 16
-#define G4X_M1_HDMI_DAC_MAX 23
-#define G4X_M2_HDMI_DAC_MIN 5
-#define G4X_M2_HDMI_DAC_MAX 11
-#define G4X_P_HDMI_DAC_MIN 5
-#define G4X_P_HDMI_DAC_MAX 80
-#define G4X_P1_HDMI_DAC_MIN 1
-#define G4X_P1_HDMI_DAC_MAX 8
-#define G4X_P2_HDMI_DAC_SLOW 10
-#define G4X_P2_HDMI_DAC_FAST 5
-#define G4X_P2_HDMI_DAC_LIMIT 165000
-
-/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
-#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
-#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
-#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
-#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
-#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
-#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
-#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
-#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
-#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
-#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
-#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
-#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
-#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
-#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
-#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
-#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
-#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
-
-/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
-#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
-#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
-#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
-#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
-#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
-#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
-#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
-#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
-#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
-#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
-#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
-#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
-#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
-#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
-#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
-#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
-#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
-
-/*The parameter is for DISPLAY PORT on G4x platform*/
-#define G4X_DOT_DISPLAY_PORT_MIN 161670
-#define G4X_DOT_DISPLAY_PORT_MAX 227000
-#define G4X_N_DISPLAY_PORT_MIN 1
-#define G4X_N_DISPLAY_PORT_MAX 2
-#define G4X_M_DISPLAY_PORT_MIN 97
-#define G4X_M_DISPLAY_PORT_MAX 108
-#define G4X_M1_DISPLAY_PORT_MIN 0x10
-#define G4X_M1_DISPLAY_PORT_MAX 0x12
-#define G4X_M2_DISPLAY_PORT_MIN 0x05
-#define G4X_M2_DISPLAY_PORT_MAX 0x06
-#define G4X_P_DISPLAY_PORT_MIN 10
-#define G4X_P_DISPLAY_PORT_MAX 20
-#define G4X_P1_DISPLAY_PORT_MIN 1
-#define G4X_P1_DISPLAY_PORT_MAX 2
-#define G4X_P2_DISPLAY_PORT_SLOW 10
-#define G4X_P2_DISPLAY_PORT_FAST 10
-#define G4X_P2_DISPLAY_PORT_LIMIT 0
-
-/* Ironlake */
-/* as we calculate clock using (register_value + 2) for
- N/M1/M2, so here the range value for them is (actual_value-2).
- */
-#define IRONLAKE_DOT_MIN 25000
-#define IRONLAKE_DOT_MAX 350000
-#define IRONLAKE_VCO_MIN 1760000
-#define IRONLAKE_VCO_MAX 3510000
-#define IRONLAKE_N_MIN 1
-#define IRONLAKE_N_MAX 5
-#define IRONLAKE_M_MIN 79
-#define IRONLAKE_M_MAX 118
-#define IRONLAKE_M1_MIN 12
-#define IRONLAKE_M1_MAX 23
-#define IRONLAKE_M2_MIN 5
-#define IRONLAKE_M2_MAX 9
-#define IRONLAKE_P_SDVO_DAC_MIN 5
-#define IRONLAKE_P_SDVO_DAC_MAX 80
-#define IRONLAKE_P_LVDS_MIN 28
-#define IRONLAKE_P_LVDS_MAX 112
-#define IRONLAKE_P1_MIN 1
-#define IRONLAKE_P1_MAX 8
-#define IRONLAKE_P2_SDVO_DAC_SLOW 10
-#define IRONLAKE_P2_SDVO_DAC_FAST 5
-#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
-#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
-#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
-
-#define IRONLAKE_P_DISPLAY_PORT_MIN 10
-#define IRONLAKE_P_DISPLAY_PORT_MAX 20
-#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
-#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
-#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
-#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
-#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
-static bool
-intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
-static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
-static bool
-intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ WARN_ON(!HAS_PCH_SPLIT(dev));
-static bool
-intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
-static bool
-intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+ if (IS_GEN5(dev)) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+ } else
+ return 27;
+}
+
+static const intel_limit_t intel_limits_i8xx_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+};
static const intel_limit_t intel_limits_i8xx_dvo = {
- .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
- .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
- .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
- .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
- .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
- .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
- .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
- .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
- .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
- .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
- .find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
};
static const intel_limit_t intel_limits_i8xx_lvds = {
- .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
- .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
- .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
- .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
- .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
- .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
- .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
- .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
- .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
- .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
- .find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
};
-
+
static const intel_limit_t intel_limits_i9xx_sdvo = {
- .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
- .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
- .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
- .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
- .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
- .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
- .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
- .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
- .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
- .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
- .find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
};
static const intel_limit_t intel_limits_i9xx_lvds = {
- .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
- .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
- .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
- .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
- .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
- .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
- .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
- .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
- /* The single-channel range is 25-112Mhz, and dual-channel
- * is 80-224Mhz. Prefer single channel as much as possible.
- */
- .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
- .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
- .find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
};
- /* below parameter and function is for G4X Chipset Family*/
+
static const intel_limit_t intel_limits_g4x_sdvo = {
- .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
- .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
- .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
- .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
- .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
- .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
- .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
- .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
- .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
- .p2_slow = G4X_P2_SDVO_SLOW,
- .p2_fast = G4X_P2_SDVO_FAST
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
},
- .find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_hdmi = {
- .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
- .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
- .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
- .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
- .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
- .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
- .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
- .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
- .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
- .p2_slow = G4X_P2_HDMI_DAC_SLOW,
- .p2_fast = G4X_P2_HDMI_DAC_FAST
- },
- .find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
};
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
- .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
- .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
- .vco = { .min = G4X_VCO_MIN,
- .max = G4X_VCO_MAX },
- .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
- .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
- .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
- .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
- .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
- .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
- .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
- .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
- .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
- .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
- .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
- .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
- .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
- .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
- .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
},
- .find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
- .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
- .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
- .vco = { .min = G4X_VCO_MIN,
- .max = G4X_VCO_MAX },
- .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
- .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
- .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
- .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
- .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
- .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
- .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
- .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
- .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
- .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
- .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
- .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
- .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
- .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
- .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
},
- .find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
-};
-
-static const intel_limit_t intel_limits_g4x_display_port = {
- .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
- .max = G4X_DOT_DISPLAY_PORT_MAX },
- .vco = { .min = G4X_VCO_MIN,
- .max = G4X_VCO_MAX},
- .n = { .min = G4X_N_DISPLAY_PORT_MIN,
- .max = G4X_N_DISPLAY_PORT_MAX },
- .m = { .min = G4X_M_DISPLAY_PORT_MIN,
- .max = G4X_M_DISPLAY_PORT_MAX },
- .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
- .max = G4X_M1_DISPLAY_PORT_MAX },
- .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
- .max = G4X_M2_DISPLAY_PORT_MAX },
- .p = { .min = G4X_P_DISPLAY_PORT_MIN,
- .max = G4X_P_DISPLAY_PORT_MAX },
- .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
- .max = G4X_P1_DISPLAY_PORT_MAX},
- .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
- .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
- .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
- .find_pll = intel_find_pll_g4x_dp,
};
static const intel_limit_t intel_limits_pineview_sdvo = {
- .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
- .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
- .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
- .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
- .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
- .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
- .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
- .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
- .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
- .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
- .find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
};
static const intel_limit_t intel_limits_pineview_lvds = {
- .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
- .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
- .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
- .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
- .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
- .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
- .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
- .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
- /* Pineview only supports single-channel mode. */
- .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
- .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
- .find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const intel_limit_t intel_limits_ironlake_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_sdvo = {
- .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
- .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
- .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
- .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
- .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
- .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
- .find_pll = intel_g4x_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_ironlake_lvds = {
- .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
- .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
- .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
- .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
- .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_LVDS_SLOW,
- .p2_fast = IRONLAKE_P2_LVDS_FAST },
- .find_pll = intel_g4x_find_best_PLL,
+/* LVDS 100mhz refclk limits. */
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_display_port = {
- .dot = { .min = IRONLAKE_DOT_MIN,
- .max = IRONLAKE_DOT_MAX },
- .vco = { .min = IRONLAKE_VCO_MIN,
- .max = IRONLAKE_VCO_MAX},
- .n = { .min = IRONLAKE_N_MIN,
- .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN,
- .max = IRONLAKE_M_MAX },
- .m1 = { .min = IRONLAKE_M1_MIN,
- .max = IRONLAKE_M1_MAX },
- .m2 = { .min = IRONLAKE_M2_MIN,
- .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
- .max = IRONLAKE_P_DISPLAY_PORT_MAX },
- .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
- .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
- .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
- .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
- .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
- .find_pll = intel_find_pll_ironlake_dp,
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
};
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+static const intel_limit_t intel_limits_vlv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 270000 * 5 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const intel_limit_t intel_limits_chv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .vco = { .min = 4860000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
+static void vlv_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
+ return true;
+
+ return false;
+}
+
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_ironlake_lvds;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- HAS_eDP)
- limit = &intel_limits_ironlake_display_port;
- else
- limit = &intel_limits_ironlake_sdvo;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_is_dual_link_lvds(dev)) {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else
+ limit = &intel_limits_ironlake_dac;
return limit;
}
@@ -559,54 +408,54 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- /* LVDS with dual channel */
+ if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
- /* LVDS with dual channel */
limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
- } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_IRONLAKE(dev))
- limit = intel_ironlake_limit(crtc);
+ if (HAS_PCH_SPLIT(dev))
+ limit = intel_ironlake_limit(crtc, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
- } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i9xx_lvds;
- else
- limit = &intel_limits_i9xx_sdvo;
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (IS_CHERRYVIEW(dev)) {
+ limit = &intel_limits_chv;
+ } else if (IS_VALLEYVIEW(dev)) {
+ limit = &intel_limits_vlv;
+ } else if (!IS_GEN2(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i9xx_lvds;
+ else
+ limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
+ else
+ limit = &intel_limits_i8xx_dac;
}
return limit;
}
@@ -616,57 +465,36 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
- clock->vco = refclk * clock->m / clock->n;
- clock->dot = clock->vco / clock->p;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
-static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
{
- if (IS_PINEVIEW(dev)) {
- pineview_clock(refclk, clock);
- return;
- }
- clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
- clock->p = clock->p1 * clock->p2;
- clock->vco = refclk * clock->m / (clock->n + 2);
- clock->dot = clock->vco / clock->p;
+ return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+static void i9xx_clock(int refclk, intel_clock_t *clock)
{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- struct intel_output *intel_output = to_intel_output(l_entry);
- if (intel_output->type == type)
- return true;
- }
- }
- return false;
+ clock->m = i9xx_dpll_compute_m(clock);
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
-struct drm_connector *
-intel_pipe_get_output (struct drm_crtc *crtc)
+static void chv_clock(int refclk, intel_clock_t *clock)
{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry, *ret = NULL;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- ret = l_entry;
- break;
- }
- }
- return ret;
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
+ clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -675,56 +503,57 @@ intel_pipe_get_output (struct drm_crtc *crtc)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+static bool intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
{
- const intel_limit_t *limit = intel_limit (crtc);
- struct drm_device *dev = crtc->dev;
-
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid("n out of range\n");
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid ("p1 out of range\n");
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid ("p out of range\n");
+ INTELPllInvalid("p1 out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid ("m2 out of range\n");
+ INTELPllInvalid("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid ("m1 out of range\n");
- if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
- INTELPllInvalid ("m1 <= m2\n");
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid ("m out of range\n");
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid ("n out of range\n");
+ INTELPllInvalid("m1 out of range\n");
+
+ if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+ if (clock->m1 <= clock->m2)
+ INTELPllInvalid("m1 <= m2\n");
+
+ if (!IS_VALLEYVIEW(dev)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid("m out of range\n");
+ }
+
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid ("vco out of range\n");
+ INTELPllInvalid("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid ("dot out of range\n");
+ INTELPllInvalid("dot out of range\n");
return true;
}
static bool
-intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-
+i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (I915_READ(LVDS)) != 0) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
*/
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -735,14 +564,13 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
clock.p2 = limit->p2.p2_fast;
}
- memset (best_clock, 0, sizeof (*best_clock));
+ memset(best_clock, 0, sizeof(*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in Pineview */
- if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+ if (clock.m2 >= clock.m1)
break;
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
@@ -750,9 +578,12 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
- intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
+ i9xx_clock(refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
continue;
this_err = abs(clock.dot - target);
@@ -768,68 +599,80 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
return (err != target);
}
-
static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-
+pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
int err = target;
- bool found = false;
- memcpy(&clock, best_clock, sizeof(intel_clock_t));
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev))
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in Pineview */
- if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
- break;
- for (clock.n = limit->n.min; clock.n <= limit->n.max;
- clock.n++) {
- int this_err;
+ memset(best_clock, 0, sizeof(*best_clock));
- intel_clock(dev, refclk, &clock);
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
+ pineview_clock(refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- found = true;
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
}
}
}
}
- return found;
+ return (err != target);
}
static bool
-intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
- /* approximately equals target * 0.00488 */
- int err_most = (target >> 8) + (target >> 10);
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- int lvds_reg;
-
- if (IS_IRONLAKE(dev))
- lvds_reg = PCH_LVDS;
- else
- lvds_reg = LVDS;
- if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -842,9 +685,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
+ /* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
+ /* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -853,10 +696,12 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
- intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
+ i9xx_clock(refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
- this_err = abs(clock.dot - target) ;
+
+ this_err = abs(clock.dot - target);
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
@@ -871,1125 +716,4110 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
- /* return directly when it is eDP */
- if (HAS_eDP)
- return true;
+ target *= 5; /* fast clock */
- if (target < 200000) {
- clock.n = 1;
- clock.p1 = 2;
- clock.p2 = 10;
- clock.m1 = 12;
- clock.m2 = 9;
- } else {
- clock.n = 2;
- clock.p1 = 1;
- clock.p2 = 10;
- clock.m1 = 14;
- clock.m2 = 8;
- }
- intel_clock(dev, refclk, &clock);
- memcpy(best_clock, &clock, sizeof(intel_clock_t));
- return true;
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm, diff;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_clock(refclk, &clock);
+
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+
+ diff = abs(clock.dot - target);
+ ppm = div_u64(1000000ULL * diff, target);
+
+ if (ppm < 100 && clock.p > best_clock->p) {
+ bestppm = 0;
+ *best_clock = clock;
+ found = true;
+ }
+
+ if (bestppm >= 10 && ppm < bestppm - 10) {
+ bestppm = ppm;
+ *best_clock = clock;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+
+ return found;
}
-/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
-intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-{
- intel_clock_t clock;
- if (target < 200000) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 2;
- clock.m1 = 23;
- clock.m2 = 8;
- } else {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 1;
- clock.m1 = 14;
- clock.m2 = 2;
- }
- clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
- clock.p = (clock.p1 * clock.p2);
- clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
- clock.vco = 0;
- memcpy(best_clock, &clock, sizeof(intel_clock_t));
- return true;
+chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ uint64_t m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1, clock.m1 = 2;
+ target *= 5; /* fast clock */
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+
+ clock.p = clock.p1 * clock.p2;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
+ clock.n) << 22, refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_clock(refclk, &clock);
+
+ if (!intel_PLL_is_valid(dev, limit, &clock))
+ continue;
+
+ /* based on hardware requirement, prefer bigger p
+ */
+ if (clock.p > best_clock->p) {
+ *best_clock = clock;
+ found = true;
+ }
+ }
+ }
+
+ return found;
}
-void
-intel_wait_for_vblank(struct drm_device *dev)
+bool intel_crtc_active(struct drm_crtc *crtc)
{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- msleep(20);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->primary->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ */
+ return intel_crtc->active && crtc->primary->fb &&
+ intel_crtc->config.adjusted_mode.crtc_clock;
}
-/* Parameters have changed, update FBC info */
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane, i;
- u32 fbc_ctl, fbc_ctl2;
- dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ return intel_crtc->config.cpu_transcoder;
+}
- if (fb->pitch < dev_priv->cfb_pitch)
- dev_priv->cfb_pitch = fb->pitch;
+static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
- /* FBC_CTL wants 64B units */
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
- plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+ frame = I915_READ(frame_reg);
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
+ if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+ WARN(1, "vblank wait timed out\n");
+}
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- fbc_ctl2 |= FBC_CTL_CPU_FENCE;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe. Needed for various bits of
+ * mode setting code.
+ */
+void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipestat_reg = PIPESTAT(pipe);
- /* enable it... */
- fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
- fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- fbc_ctl |= dev_priv->cfb_fence;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ g4x_wait_for_vblank(dev, pipe);
+ return;
+ }
- DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
- dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ /* Clear existing vblank status. Note this will clear any other
+ * sticky status fields as well.
+ *
+ * This races with i915_driver_irq_handler() with the result
+ * that either function could miss a vblank event. Here it is not
+ * fatal, as we will either wait upon the next vblank interrupt or
+ * timeout. Generally speaking intel_wait_for_vblank() is only
+ * called during modeset at which time the GPU should be idle and
+ * should *not* be performing page flips and thus not waiting on
+ * vblanks...
+ * Currently, the result of us stealing a vblank from the irq
+ * handler is that a single frame will be skipped during swapbuffers.
+ */
+ I915_WRITE(pipestat_reg,
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+ /* Wait for vblank interrupt bit to set */
+ if (wait_for(I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS,
+ 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
}
-void i8xx_disable_fbc(struct drm_device *dev)
+static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPEDSL(pipe);
+ u32 line1, line2;
+ u32 line_mask;
+
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
+ line1 = I915_READ(reg) & line_mask;
+ mdelay(5);
+ line2 = I915_READ(reg) & line_mask;
+
+ return line1 == line2;
+}
+
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * On Gen4 and above:
+ * wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+ *
+ */
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int reg = PIPECONF(cpu_transcoder);
+
+ /* Wait for the Pipe State to go off */
+ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100))
+ WARN(1, "pipe_off wait timed out\n");
+ } else {
+ /* Wait for the display line to settle */
+ if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
+ WARN(1, "pipe_off wait timed out\n");
+ }
+}
+
+/*
+ * ibx_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Returns true if @port is connected, false otherwise.
+ */
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ switch (port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG;
+ break;
+ default:
+ return true;
+ }
+ } else {
+ switch (port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG_CPT;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG_CPT;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG_CPT;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
+static const char *state_string(bool enabled)
+{
+ return enabled ? "on" : "off";
+}
- if (!I915_HAS_FBC(dev))
+/* Only for pre-ILK configs */
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+ u32 val;
+ bool cur_state;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ cur_state = val & DSI_PLL_VCO_EN;
+ WARN(cur_state != state,
+ "DSI PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+
+struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (crtc->config.shared_dpll < 0)
+ return NULL;
+
+ return &dev_priv->shared_dplls[crtc->config.shared_dpll];
+}
+
+/* For ILK+ */
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
+{
+ bool cur_state;
+ struct intel_dpll_hw_state hw_state;
+
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
return;
+ }
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ if (WARN (!pll,
+ "asserting DPLL %s with no DPLL\n", state_string(state)))
+ return;
- /* Wait for compressing bit to clear */
- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
- ; /* nothing */
+ cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
+ WARN(cur_state != state,
+ "%s assertion failure (expected %s, current %s)\n",
+ pll->name, state_string(state), state_string(cur_state));
+}
- intel_wait_for_vblank(dev);
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ if (HAS_DDI(dev_priv->dev)) {
+ /* DDI does not have a specific FDI_TX register */
+ reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
+ } else {
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ }
+ WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
- DRM_DEBUG_KMS("disabled FBC\n");
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
-static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg;
+ u32 val;
+
+ /* ILK FDI PLL is always enabled */
+ if (INTEL_INFO(dev_priv->dev)->gen == 5)
+ return;
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (HAS_DDI(dev_priv->dev))
+ return;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_PLL_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int pp_reg, lvds_reg;
+ u32 val;
+ enum pipe panel_pipe = PIPE_A;
+ bool locked = true;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ pp_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ pp_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ val = I915_READ(pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+ panel_pipe = PIPE_B;
+
+ WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
+
+static void assert_cursor(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ struct drm_device *dev = dev_priv->dev;
+ bool cur_state;
+
+ if (IS_845G(dev) || IS_I865G(dev))
+ cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+ else
+ cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+ WARN(cur_state != state,
+ "cursor on pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
+#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
+
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ /* if we need the pipe A quirk it must be always on */
+ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ state = true;
+
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
+ cur_state = false;
+ } else {
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ }
+
+ WARN(cur_state != state,
+ "pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+
+static void assert_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+ WARN(cur_state != state,
+ "plane %c assertion failure (expected %s, current %s)\n",
+ plane_name(plane), state_string(state), state_string(cur_state));
+}
+
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int reg, i;
+ u32 val;
+ int cur_pipe;
+
+ /* Primary planes are fixed to pipes on gen4+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ reg = DSPCNTR(pipe);
+ val = I915_READ(reg);
+ WARN(val & DISPLAY_PLANE_ENABLE,
+ "plane %c assertion failure, should be disabled but not\n",
+ plane_name(pipe));
+ return;
+ }
+
+ /* Need to check both planes against the pipe */
+ for_each_pipe(i) {
+ reg = DSPCNTR(i);
+ val = I915_READ(reg);
+ cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+ WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ "plane %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(i), pipe_name(pipe));
+ }
+}
+
+static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int reg, sprite;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev)) {
+ for_each_sprite(pipe, sprite) {
+ reg = SPCNTR(pipe, sprite);
+ val = I915_READ(reg);
+ WARN(val & SP_ENABLE,
+ "sprite %c assertion failure, should be off on pipe %c but is still active\n",
+ sprite_name(pipe, sprite), pipe_name(pipe));
+ }
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ reg = SPRCTL(pipe);
+ val = I915_READ(reg);
+ WARN(val & SPRITE_ENABLE,
+ "sprite %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(pipe), pipe_name(pipe));
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ reg = DVSCNTR(pipe);
+ val = I915_READ(reg);
+ WARN(val & DVS_ENABLE,
+ "sprite %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(pipe), pipe_name(pipe));
+ }
+}
+
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+ bool enabled;
+
+ reg = PCH_TRANSCONF(pipe);
+ val = I915_READ(reg);
+ enabled = !!(val & TRANS_ENABLE);
+ WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 port_sel, u32 val)
+{
+ if ((val & DP_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+ u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+ if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+ return false;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
+ return false;
+ } else {
+ if ((val & DP_PIPE_MASK) != (pipe << 30))
+ return false;
+ }
+ return true;
+}
+
+static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & SDVO_ENABLE) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
+ return false;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
+ return false;
+ } else {
+ if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & LVDS_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & ADPA_DAC_ENABLE) == 0)
+ return false;
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+ return false;
+ }
+ return true;
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg, u32 port_sel)
+{
+ u32 val = I915_READ(reg);
+ WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ && (val & DP_PIPEB_SELECT),
+ "IBX PCH dp port still using transcoder B\n");
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+ "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+ && (val & SDVO_PIPE_B_SELECT),
+ "IBX PCH hdmi port still using transcoder B\n");
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+}
+
+static void intel_init_dpio(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
- DPFC_CTL_PLANEB);
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
-
- dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /*
+ * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+ * CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(dev)) {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+ DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
} else {
- I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
}
+}
+
+static void intel_reset_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ if (IS_CHERRYVIEW(dev)) {
+ enum dpio_phy phy;
+ u32 val;
+
+ for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
+ /* Poll for phypwrgood signal */
+ if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
+ PHY_POWERGOOD(phy), 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+ /*
+ * Deassert common lane reset for PHY.
+ *
+ * This should only be done on init and resume from S3
+ * with both PLLs disabled, or we risk losing DPIO and
+ * PLL synchronization.
+ */
+ val = I915_READ(DISPLAY_PHY_CONTROL);
+ I915_WRITE(DISPLAY_PHY_CONTROL,
+ PHY_COM_LANE_RESET_DEASSERT(phy, val));
+ }
+
+ } else {
+ /*
+ * If DPIO has already been reset, e.g. by BIOS, just skip all
+ * this.
+ */
+ if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
+ false);
+ __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
+ true);
+ }
+}
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+static void vlv_enable_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ /* No really, not for ILK+ */
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+ assert_panel_unlocked(dev_priv, crtc->pipe);
+
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+
+ I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(crtc->pipe));
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
}
-void g4x_disable_fbc(struct drm_device *dev)
+static void chv_enable_pll(struct intel_crtc *crtc)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
+ int pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
+
+ mutex_lock(&dev_priv->dpio_lock);
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- intel_wait_for_vblank(dev);
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
- DRM_DEBUG_KMS("disabled FBC\n");
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("PLL %d failed to lock\n", pipe);
+
+ /* not sure when this should be written */
+ I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
-static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+static void i9xx_enable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+ /* No really, not for ILK+ */
+ BUG_ON(INTEL_INFO(dev)->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ assert_panel_unlocked(dev_priv, crtc->pipe);
+
+ I915_WRITE(reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DPLL_MD(crtc->pipe),
+ crtc->config.dpll_hw_state.dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(reg, dpll);
+ }
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
}
/**
- * intel_update_fbc - enable/disable FBC as needed
- * @crtc: CRTC to point the compressor at
- * @mode: mode in use
+ * i9xx_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
*
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= 2048 in width, 1536 in height
+ * Disable the PLL for @pipe, making sure the pipe is off first.
*
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
+ * Note! This is for pre-ILK only.
+ */
+static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ I915_WRITE(DPLL(pipe), 0);
+ POSTING_READ(DPLL(pipe));
+}
+
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 val = 0;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /*
+ * Leave integrated clock source and reference clock enabled for pipe B.
+ * The latter is needed for VGA hotplug / manual detection.
+ */
+ if (pipe == PIPE_B)
+ val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
+ I915_WRITE(DPLL(pipe), val);
+ POSTING_READ(DPLL(pipe));
+
+}
+
+static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* Set PLL en = 0 */
+ val = DPLL_SSC_REF_CLOCK_CHV;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ I915_WRITE(DPLL(pipe), val);
+ POSTING_READ(DPLL(pipe));
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Disable 10bit clock to display controller */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val &= ~DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport)
+{
+ u32 port_mask;
+ int dpll_reg;
+
+ switch (dport->port) {
+ case PORT_B:
+ port_mask = DPLL_PORTB_READY_MASK;
+ dpll_reg = DPLL(0);
+ break;
+ case PORT_C:
+ port_mask = DPLL_PORTC_READY_MASK;
+ dpll_reg = DPLL(0);
+ break;
+ case PORT_D:
+ port_mask = DPLL_PORTD_READY_MASK;
+ dpll_reg = DPIO_PHY_STATUS;
+ break;
+ default:
+ BUG();
+ }
+
+ if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
+ WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
+ port_name(dport->port), I915_READ(dpll_reg));
+}
+
+static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ WARN_ON(!pll->refcount);
+ if (pll->active == 0) {
+ DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+ WARN_ON(pll->on);
+ assert_shared_dpll_disabled(dev_priv, pll);
+
+ pll->mode_set(dev_priv, pll);
+ }
+}
+
+/**
+ * intel_enable_shared_dpll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
*
- * We need to enable/disable FBC on a global basis.
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
*/
-static void intel_update_fbc(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+static void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
- if (!i915_powersave)
+ if (WARN_ON(pll == NULL))
return;
- if (!dev_priv->display.fbc_enabled ||
- !dev_priv->display.enable_fbc ||
- !dev_priv->display.disable_fbc)
+ if (WARN_ON(pll->refcount == 0))
return;
- if (!crtc->fb)
+ DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
+ pll->name, pll->active, pll->on,
+ crtc->base.base.id);
+
+ if (pll->active++) {
+ WARN_ON(!pll->on);
+ assert_shared_dpll_enabled(dev_priv, pll);
return;
+ }
+ WARN_ON(pll->on);
- intel_fb = to_intel_framebuffer(fb);
- obj_priv = intel_fb->obj->driver_private;
+ DRM_DEBUG_KMS("enabling %s\n", pll->name);
+ pll->enable(dev_priv, pll);
+ pll->on = true;
+}
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- if (intel_fb->obj->size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- goto out_disable;
- }
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- goto out_disable;
+static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ /* PCH only available on ILK+ */
+ BUG_ON(INTEL_INFO(dev)->gen < 5);
+ if (WARN_ON(pll == NULL))
+ return;
+
+ if (WARN_ON(pll->refcount == 0))
+ return;
+
+ DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
+ pll->name, pll->active, pll->on,
+ crtc->base.base.id);
+
+ if (WARN_ON(pll->active == 0)) {
+ assert_shared_dpll_disabled(dev_priv, pll);
+ return;
}
- if ((mode->hdisplay > 2048) ||
- (mode->vdisplay > 1536)) {
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- goto out_disable;
+
+ assert_shared_dpll_enabled(dev_priv, pll);
+ WARN_ON(!pll->on);
+ if (--pll->active)
+ return;
+
+ DRM_DEBUG_KMS("disabling %s\n", pll->name);
+ pll->disable(dev_priv, pll);
+ pll->on = false;
+}
+
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t reg, val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(INTEL_INFO(dev)->gen < 5);
+
+ /* Make sure PCH DPLL is enabled */
+ assert_shared_dpll_enabled(dev_priv,
+ intel_crtc_to_shared_dpll(intel_crtc));
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev)) {
+ /* Workaround: Set the timing override bit before enabling the
+ * pch transcoder. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
}
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- goto out_disable;
+
+ reg = PCH_TRANSCONF(pipe);
+ val = I915_READ(reg);
+ pipeconf_val = I915_READ(PIPECONF(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPECONF_BPC_MASK;
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
}
- if (obj_priv->tiling_mode != I915_TILING_X) {
- DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
- goto out_disable;
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if (HAS_PCH_IBX(dev_priv->dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ val |= TRANS_LEGACY_INTERLACED_ILK;
+ else
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(reg, val | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+}
+
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+ /* Workaround: set timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(LPT_TRANSCONF, val);
+ if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t reg, val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = PCH_TRANSCONF(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+
+ if (!HAS_PCH_IBX(dev)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
}
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(LPT_TRANSCONF);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(LPT_TRANSCONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @crtc: crtc responsible for the pipe
+ *
+ * Enable @crtc's pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ */
+static void intel_enable_pipe(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ enum pipe pch_transcoder;
+ int reg;
+ u32 val;
+
+ assert_planes_disabled(dev_priv, pipe);
+ assert_cursor_disabled(dev_priv, pipe);
+ assert_sprites_disabled(dev_priv, pipe);
+
+ if (HAS_PCH_LPT(dev_priv->dev))
+ pch_transcoder = TRANSCODER_A;
+ else
+ pch_transcoder = pipe;
- if (dev_priv->display.fbc_enabled(crtc)) {
- /* We can re-enable it in this case, but need to update pitch */
- if (fb->pitch > dev_priv->cfb_pitch)
- dev_priv->display.disable_fbc(dev);
- if (obj_priv->fence_reg != dev_priv->cfb_fence)
- dev_priv->display.disable_fbc(dev);
- if (plane != dev_priv->cfb_plane)
- dev_priv->display.disable_fbc(dev);
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv->dev))
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
+ else {
+ if (crtc->config.has_pch_encoder) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv,
+ (enum pipe) cpu_transcoder);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
}
- if (!dev_priv->display.fbc_enabled(crtc)) {
- /* Now try to turn it back on if possible */
- dev_priv->display.enable_fbc(crtc, 500);
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE) {
+ WARN_ON(!(pipe == PIPE_A &&
+ dev_priv->quirks & QUIRK_PIPEA_FORCE));
+ return;
}
- return;
+ I915_WRITE(reg, val | PIPECONF_ENABLE);
+ POSTING_READ(reg);
+}
-out_disable:
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- /* Multiple disables should be harmless */
- if (dev_priv->display.fbc_enabled(crtc))
- dev_priv->display.disable_fbc(dev);
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int reg;
+ u32 val;
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(dev_priv, pipe);
+ assert_cursor_disabled(dev_priv, pipe);
+ assert_sprites_disabled(dev_priv, pipe);
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ if ((val & PIPECONF_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
-static int
-intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+
+ I915_WRITE(reg, I915_READ(reg));
+ POSTING_READ(reg);
+}
+
+/**
+ * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ int reg;
+ u32 val;
+
+ /* If the pipe isn't enabled, we can't pump pixels and may hang */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ if (intel_crtc->primary_enabled)
+ return;
+
+ intel_crtc->primary_enabled = true;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ WARN_ON(val & DISPLAY_PLANE_ENABLE);
+
+ I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_flush_primary_plane(dev_priv, plane);
+
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+/**
+ * intel_disable_primary_hw_plane - disable the primary hardware plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ int reg;
+ u32 val;
+
+ if (!intel_crtc->primary_enabled)
+ return;
+
+ intel_crtc->primary_enabled = false;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
+
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_primary_plane(dev_priv, plane);
+}
+
+static bool need_vtd_wa(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
+static int intel_align_height(struct drm_device *dev, int height, bool tiled)
+{
+ int tile_height;
+
+ tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
+ return ALIGN(height, tile_height);
+}
+
+int
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *pipelined)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
case I915_TILING_NONE:
- alignment = 64 * 1024;
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ alignment = 128 * 1024;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ alignment = 4 * 1024;
+ else
+ alignment = 64 * 1024;
break;
case I915_TILING_X:
/* pin() will align the object as required by fence */
alignment = 0;
break;
case I915_TILING_Y:
- /* FIXME: Is this true? */
- DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ WARN(1, "Y tiled bo slipped through, driver bug!\n");
return -EINVAL;
default:
BUG();
}
- ret = i915_gem_object_pin(obj, alignment);
- if (ret != 0)
- return ret;
+ /* Note that the w/a also requires 64 PTE of padding following the
+ * bo. We currently fill all unused PTE with the shadow page and so
+ * we should always have valid PTE following the scanout preventing
+ * the VT-d warning.
+ */
+ if (need_vtd_wa(dev) && alignment < 256 * 1024)
+ alignment = 256 * 1024;
+
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ if (ret)
+ goto err_interruptible;
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
- }
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
+ i915_gem_object_pin_fence(obj);
+
+ dev_priv->mm.interruptible = true;
return 0;
+
+err_unpin:
+ i915_gem_object_unpin_from_display_plane(obj);
+err_interruptible:
+ dev_priv->mm.interruptible = true;
+ return ret;
}
-static int
-intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- unsigned long Start, Offset;
- int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- u32 dspcntr;
- int ret;
+ i915_gem_object_unpin_fence(obj);
+ i915_gem_object_unpin_from_display_plane(obj);
+}
- /* no fb bound */
- if (!crtc->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int cpp,
+ unsigned int pitch)
+{
+ if (tiling_mode != I915_TILING_NONE) {
+ unsigned int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+
+ tiles = *x / (512/cpp);
+ *x %= 512/cpp;
+
+ return tile_rows * pitch * 8 + tiles * 4096;
+ } else {
+ unsigned int offset;
+
+ offset = *y * pitch + *x * cpp;
+ *y = 0;
+ *x = (offset & 4095) / cpp;
+ return offset & -4096;
}
+}
- switch (plane) {
- case 0:
- case 1:
- break;
+int intel_format_to_fourcc(int format)
+{
+ switch (format) {
+ case DISPPLANE_8BPP:
+ return DRM_FORMAT_C8;
+ case DISPPLANE_BGRX555:
+ return DRM_FORMAT_XRGB1555;
+ case DISPPLANE_BGRX565:
+ return DRM_FORMAT_RGB565;
default:
- DRM_ERROR("Can't update plane %d in SAREA\n", plane);
- return -EINVAL;
+ case DISPPLANE_BGRX888:
+ return DRM_FORMAT_XRGB8888;
+ case DISPPLANE_RGBX888:
+ return DRM_FORMAT_XBGR8888;
+ case DISPPLANE_BGRX101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DISPPLANE_RGBX101010:
+ return DRM_FORMAT_XBGR2101010;
}
+}
- intel_fb = to_intel_framebuffer(crtc->fb);
- obj = intel_fb->obj;
- obj_priv = obj->driver_private;
+static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ u32 base = plane_config->base;
+
+ if (plane_config->size == 0)
+ return false;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
+ plane_config->size);
+ if (!obj)
+ return false;
+
+ if (plane_config->tiled) {
+ obj->tiling_mode = I915_TILING_X;
+ obj->stride = crtc->base.primary->fb->pitches[0];
+ }
+
+ mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
+ mode_cmd.width = crtc->base.primary->fb->width;
+ mode_cmd.height = crtc->base.primary->fb->height;
+ mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
+
+ if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
+ &mode_cmd, obj)) {
+ DRM_DEBUG_KMS("intel fb init failed\n");
+ goto out_unref_obj;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_KMS("plane fb obj %p\n", obj);
+ return true;
+
+out_unref_obj:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+ return false;
+}
+
+static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_crtc *c;
+ struct intel_crtc *i;
+ struct intel_framebuffer *fb;
+
+ if (!intel_crtc->base.primary->fb)
+ return;
+
+ if (intel_alloc_plane_obj(intel_crtc, plane_config))
+ return;
+
+ kfree(intel_crtc->base.primary->fb);
+ intel_crtc->base.primary->fb = NULL;
+
+ /*
+ * Failed to alloc the obj, check to see if we should share
+ * an fb with another CRTC instead
+ */
+ for_each_crtc(dev, c) {
+ i = to_intel_crtc(c);
+
+ if (c == &intel_crtc->base)
+ continue;
+
+ if (!i->active || !c->primary->fb)
+ continue;
+
+ fb = to_intel_framebuffer(c->primary->fb);
+ if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+ drm_framebuffer_reference(c->primary->fb);
+ intel_crtc->base.primary->fb = c->primary->fb;
+ break;
+ }
}
+}
- dspcntr = I915_READ(dspcntr_reg);
+static void i9xx_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long linear_offset;
+ u32 dspcntr;
+ u32 reg;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
break;
- case 24:
- case 32:
- if (crtc->fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth\n");
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ BUG();
}
- if (IS_I965G(dev)) {
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IRONLAKE(dev))
- /* must disable */
+ if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- I915_WRITE(dspcntr_reg, dspcntr);
+ I915_WRITE(reg, dspcntr);
- Start = obj_priv->gtt_offset;
- Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
- I915_WRITE(dspstride, crtc->fb->pitch);
- if (IS_I965G(dev)) {
- I915_WRITE(dspbase, Offset);
- I915_READ(dspbase);
- I915_WRITE(dspsurf, Start);
- I915_READ(dspsurf);
- I915_WRITE(dsptileoff, (y << 16) | x);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
} else {
- I915_WRITE(dspbase, Start + Offset);
- I915_READ(dspbase);
+ intel_crtc->dspaddr_offset = linear_offset;
}
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DSPSURF(plane),
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ } else
+ I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
+ POSTING_READ(reg);
+}
- intel_wait_for_vblank(dev);
+static void ironlake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long linear_offset;
+ u32 dspcntr;
+ u32 reg;
- if (old_fb) {
- intel_fb = to_intel_framebuffer(old_fb);
- obj_priv = intel_fb->obj->driver_private;
- i915_gem_object_unpin(intel_fb->obj);
- }
- intel_increase_pllclock(crtc, true);
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
- mutex_unlock(&dev->struct_mutex);
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ default:
+ BUG();
+ }
- if (!dev->primary->master)
- return 0;
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return 0;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
+ else
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- if (pipe) {
- master_priv->sarea_priv->pipeB_x = x;
- master_priv->sarea_priv->pipeB_y = y;
+ I915_WRITE(reg, dspcntr);
+
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_WRITE(DSPSURF(plane),
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
- master_priv->sarea_priv->pipeA_x = x;
- master_priv->sarea_priv->pipeA_y = y;
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
}
+ POSTING_READ(reg);
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+ intel_increase_pllclock(crtc);
+
+ dev_priv->display.update_primary_plane(crtc, fb, x, y);
return 0;
}
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga (struct drm_device *dev)
+void intel_display_handle_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u8 sr1;
- u32 vga_reg;
+ struct drm_crtc *crtc;
- if (IS_IRONLAKE(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
+ /*
+ * Flips in the rings have been nuked by the reset,
+ * so complete all pending flips so that user space
+ * will get its events and not get stuck.
+ *
+ * Also update the base address of all primary
+ * planes to the the last fb to make sure we're
+ * showing the correct fb after a reset.
+ *
+ * Need to make two loops over the crtcs so that we
+ * don't try to grab a crtc mutex before the
+ * pending_flip_queue really got woken up.
+ */
- if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
- return;
+ for_each_crtc(dev, crtc) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum plane plane = intel_crtc->plane;
- I915_WRITE8(VGA_SR_INDEX, 1);
- sr1 = I915_READ8(VGA_SR_DATA);
- I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
- udelay(100);
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip_plane(dev, plane);
+ }
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ for_each_crtc(dev, crtc) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ drm_modeset_lock(&crtc->mutex, NULL);
+ /*
+ * FIXME: Once we have proper support for primary planes (and
+ * disabling them without disabling the entire crtc) allow again
+ * a NULL crtc->primary->fb.
+ */
+ if (intel_crtc->active && crtc->primary->fb)
+ dev_priv->display.update_primary_plane(crtc,
+ crtc->primary->fb,
+ crtc->x,
+ crtc->y);
+ drm_modeset_unlock(&crtc->mutex);
+ }
}
-static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
+static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ bool was_interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_finish_gpu(obj);
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool pending;
+
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ return false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
- DRM_DEBUG_KMS("\n");
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
+ return pending;
}
-static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
+static int
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *old_fb;
+ int ret;
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
- udelay(200);
-}
+ if (intel_crtc_has_pending_flip(crtc)) {
+ DRM_ERROR("pipe is still busy with an old pageflip\n");
+ return -EBUSY;
+ }
+
+ /* no fb bound */
+ if (!fb) {
+ DRM_ERROR("No FB bound\n");
+ return 0;
+ }
+ if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
+ DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
+ plane_name(intel_crtc->plane),
+ INTEL_INFO(dev)->num_pipes);
+ return -EINVAL;
+ }
-static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(fb)->obj,
+ NULL);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret != 0) {
+ DRM_ERROR("pin & fence failed\n");
+ return ret;
+ }
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ *
+ * To fix this properly, we need to hoist the checks up into
+ * compute_mode_changes (or above), check the actual pfit state and
+ * whether the platform allows pfit disable with pipe active, and only
+ * then update the pipesrc and pfit state, even on the flip path.
+ */
+ if (i915.fastboot) {
+ const struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+
+ I915_WRITE(PIPESRC(intel_crtc->pipe),
+ ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+ (adjusted_mode->crtc_vdisplay - 1));
+ if (!intel_crtc->config.pch_pfit.enabled &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
+ }
+ intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+ intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
+ }
+
+ dev_priv->display.update_primary_plane(crtc, fb, x, y);
+
+ old_fb = crtc->primary->fb;
+ crtc->primary->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
+ if (old_fb) {
+ if (intel_crtc->active && old_fb != fb)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_FREQ_MASK;
-
- if (clock < 200000) {
- u32 temp;
- dpa_ctl |= DP_PLL_FREQ_160MHZ;
- /* workaround for 160Mhz:
- 1) program 0x4600c bits 15:0 = 0x8124
- 2) program 0x46010 bit 0 = 1
- 3) program 0x46034 bit 24 = 1
- 4) program 0x64000 bit 14 = 1
- */
- temp = I915_READ(0x4600c);
- temp &= 0xffff0000;
- I915_WRITE(0x4600c, temp | 0x8124);
-
- temp = I915_READ(0x46010);
- I915_WRITE(0x46010, temp | 1);
-
- temp = I915_READ(0x46034);
- I915_WRITE(0x46034, temp | (1 << 24));
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (IS_IVYBRIDGE(dev)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
- dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
- I915_WRITE(DP_A, dpa_ctl);
+ I915_WRITE(reg, temp);
- udelay(500);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ udelay(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
+ FDI_FE_ERRC_ENABLE);
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
+{
+ return crtc->base.enabled && crtc->active &&
+ crtc->config.has_pch_encoder;
+}
+
+static void ivb_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+ struct intel_crtc *pipe_C_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+ uint32_t temp;
+
+ /*
+ * When everything is off disable fdi C so that we could enable fdi B
+ * with all lanes. Note that we don't care about enabled pipes without
+ * an enabled pch encoder.
+ */
+ if (!pipe_has_enabled_pch(pipe_B_crtc) &&
+ !pipe_has_enabled_pch(pipe_C_crtc)) {
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("disabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ }
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
- int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
- int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
- int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
- int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
- int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
- int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
- int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
- int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
- int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
- int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
- u32 temp;
- int tries = 5, j, n;
- u32 pipe_bpc;
+ u32 reg, temp, tries;
+
+ /* FDI needs bits from pipe first */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+ I915_READ(reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 2 fail!\n");
- temp = I915_READ(pipeconf_reg);
- pipe_bpc = temp & PIPE_BPC_MASK;
+ DRM_DEBUG_KMS("FDI train done\n");
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0) {
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- }
- }
+}
- if (HAS_eDP) {
- /* enable eDP PLL */
- ironlake_enable_pll_edp(crtc);
- } else {
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- temp = I915_READ(fdi_rx_reg);
- /*
- * make the BPC in FDI Rx be consistent with that in
- * pipeconf reg.
- */
- temp &= ~(0x7 << 16);
- temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
- FDI_SEL_PCDCLK |
- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
- I915_READ(fdi_rx_reg);
- udelay(200);
-
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i, retry;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
}
+ udelay(50);
}
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(reg, temp);
- /* Enable panel fitting for LVDS */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(pf_ctl_reg);
- I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(reg, temp);
- /* currently full aspect */
- I915_WRITE(pf_win_pos, 0);
+ POSTING_READ(reg);
+ udelay(150);
- I915_WRITE(pf_win_size,
- (dev_priv->panel_fixed_mode->hdisplay << 16) |
- (dev_priv->panel_fixed_mode->vdisplay));
- }
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
- /* Enable CPU pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- udelay(100);
- }
+ POSTING_READ(reg);
+ udelay(500);
- /* configure and enable CPU plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 2 fail!\n");
- if (!HAS_eDP) {
- /* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
- temp |= FDI_DP_PORT_WIDTH_X4; /* default */
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
-
- udelay(150);
-
- /* Train FDI. */
- /* umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
-
- temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i, j;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ I915_WRITE(reg, temp);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(1); /* should be 0.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- if ((temp & FDI_RX_BIT_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_BIT_LOCK)
- break;
- udelay(200);
- }
- if (j != tries)
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- else
- DRM_DEBUG_KMS("train 1 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("train 1 ok 2!\n");
+ if (temp & FDI_RX_BIT_LOCK ||
+ (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
+ i);
+ break;
}
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
-
- udelay(150);
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
- temp = I915_READ(fdi_rx_iir_reg);
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(2); /* should be 1.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_SYMBOL_LOCK)
- break;
- udelay(200);
- }
- if (j != tries) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 1!\n");
- } else
- DRM_DEBUG_KMS("train 2 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 2!\n");
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
}
- DRM_DEBUG_KMS("train done\n");
+ udelay(2); /* should be 1.5us */
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
+ }
- /* set transcoder timing */
- I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
- I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
- I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
+train_done:
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
- I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
- I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
- I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
- /* enable PCH transcoder */
- temp = I915_READ(transconf_reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
- I915_READ(transconf_reg);
- while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
- ;
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
- /* enable normal */
+ POSTING_READ(reg);
+ udelay(200);
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
+ POSTING_READ(reg);
+ udelay(200);
- /* wait one idle pattern time */
- udelay(100);
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- }
+ POSTING_READ(reg);
+ udelay(100);
+ }
+}
- intel_crtc_load_lut(crtc);
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
- break;
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
- }
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
- i915_disable_vga(dev);
+ POSTING_READ(reg);
+ udelay(100);
- /* disable cpu pipe, disable after all planes disabled */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- n = 0;
- /* wait for cpu pipe off, pipe state */
- while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
- n++;
- if (n < 60) {
- udelay(500);
- continue;
- } else {
- DRM_DEBUG_KMS("pipe %d off delay\n",
- pipe);
- break;
- }
- }
- } else
- DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
- udelay(100);
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
+}
- /* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
- }
- I915_WRITE(pf_win_size, 0);
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
- /* disable CPU FDI tx and PCH FDI rx */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
- I915_READ(fdi_tx_reg);
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
- temp = I915_READ(fdi_rx_reg);
- /* BPC in FDI rx is consistent with that in pipeconf */
- temp &= ~(0x07 << 16);
- temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
- /* still set train pattern 1 */
- temp = I915_READ(fdi_tx_reg);
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp);
+ POSTING_READ(reg);
+ udelay(100);
+}
- udelay(100);
+bool intel_has_pending_fb_unpin(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- I915_READ(PCH_LVDS);
- udelay(100);
- }
+ /* Note that we don't need to be called with mode_config.lock here
+ * as our list of CRTC objects is static for the lifetime of the
+ * device and so cannot disappear as we iterate. Similarly, we can
+ * happily treat the predicates as racy, atomic checks as userspace
+ * cannot claim and pin a new fb without at least acquring the
+ * struct_mutex and so serialising with us.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (atomic_read(&crtc->unpin_work_count) == 0)
+ continue;
- /* disable PCH transcoder */
- temp = I915_READ(transconf_reg);
- if ((temp & TRANS_ENABLE) != 0) {
- I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
- I915_READ(transconf_reg);
- n = 0;
- /* wait for PCH transcoder off, transcoder state */
- while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
- n++;
- if (n < 60) {
- udelay(500);
- continue;
- } else {
- DRM_DEBUG_KMS("transcoder %d off "
- "delay\n", pipe);
- break;
- }
- }
- }
- temp = I915_READ(transconf_reg);
- /* BPC in transcoder is consistent with that in pipeconf */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp);
- I915_READ(transconf_reg);
- udelay(100);
+ if (crtc->unpin_work)
+ intel_wait_for_vblank(dev, crtc->pipe);
- /* disable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (crtc->primary->fb == NULL)
+ return;
+
+ WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
+
+ WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
+ !intel_crtc_has_pending_flip(crtc),
+ 60*HZ) == 0);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->primary->fb);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* It is necessary to ungate the pixclk gate prior to programming
+ * the divisors, and gate it back when it is done.
+ */
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ /* Disable SSCCTL */
+ intel_sbi_write(dev_priv, SBI_SSCCTL6,
+ intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+ SBI_SSCCTL_DISABLE,
+ SBI_ICLK);
+
+ /* 20MHz is a corner case which is out of range for the 7-bit divisor */
+ if (clock == 20000) {
+ auxdiv = 1;
+ divsel = 0x41;
+ phaseinc = 0x20;
+ } else {
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the adjusted_mode->crtc_clock in in KHz. To get the
+ * divisors, it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor, msb_divisor_value, pi_value;
+
+ desired_divisor = (iclk_virtual_root_freq / clock);
+ msb_divisor_value = desired_divisor / iclk_pi_range;
+ pi_value = desired_divisor % iclk_pi_range;
+
+ auxdiv = 0;
+ divsel = msb_divisor_value - 2;
+ phaseinc = pi_value;
+ }
+
+ /* This should not happen with any sane values */
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ clock,
+ auxdiv,
+ divsel,
+ phasedir,
+ phaseinc);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+ enum pipe pch_transcoder)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+
+ I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
+ I915_READ(HTOTAL(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
+ I915_READ(HBLANK(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
+ I915_READ(HSYNC(cpu_transcoder)));
+
+ I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
+ I915_READ(VTOTAL(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
+ I915_READ(VBLANK(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
+ I915_READ(VSYNC(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ I915_READ(VSYNCSHIFT(cpu_transcoder)));
+}
+
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ break;
+ case PIPE_B:
+ if (intel_crtc->config.fdi_lanes > 2)
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ break;
+ case PIPE_C:
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ assert_pch_transcoder_disabled(dev_priv, pipe);
+
+ if (IS_IVYBRIDGE(dev))
+ ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+
+ /* Write the TU size bits before fdi link training, so that error
+ * detection works. */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+ /* For PCH output, training FDI link */
+ dev_priv->display.fdi_link_train(crtc);
+
+ /* We need to program the right clock selection before writing the pixel
+ * mutliplier into the DPLL. */
+ if (HAS_PCH_CPT(dev)) {
+ u32 sel;
+
+ temp = I915_READ(PCH_DPLL_SEL);
+ temp |= TRANS_DPLL_ENABLE(pipe);
+ sel = TRANS_DPLLB_SEL(pipe);
+ if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
+ temp |= sel;
+ else
+ temp &= ~sel;
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_shared_dpll tries to do the right thing, but
+ * get_shared_dpll unconditionally resets the pll - we need that to have
+ * the right LVDS enable sequence. */
+ intel_enable_shared_dpll(intel_crtc);
+
+ /* set transcoder timing, panel must allow it */
+ assert_panel_unlocked(dev_priv, pipe);
+ ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
+
+ intel_fdi_normal_train(crtc);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
+ temp |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+ temp |= bpc << 9; /* same format but at 11:9 */
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ temp |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ temp |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ BUG();
}
- if (HAS_eDP) {
- ironlake_disable_pll_edp(crtc);
+ I915_WRITE(reg, temp);
+ }
+
+ ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+ lpt_program_iclkip(crtc);
+
+ /* Set transcoder timing. */
+ ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+}
+
+static void intel_put_shared_dpll(struct intel_crtc *crtc)
+{
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ if (pll == NULL)
+ return;
+
+ if (pll->refcount == 0) {
+ WARN(1, "bad %s refcount\n", pll->name);
+ return;
+ }
+
+ if (--pll->refcount == 0) {
+ WARN_ON(pll->on);
+ WARN_ON(pll->active);
+ }
+
+ crtc->config.shared_dpll = DPLL_ID_PRIVATE;
+}
+
+static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ enum intel_dpll_id i;
+
+ if (pll) {
+ DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
+ crtc->base.base.id, pll->name);
+ intel_put_shared_dpll(crtc);
+ }
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = (enum intel_dpll_id) crtc->pipe;
+ pll = &dev_priv->shared_dplls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+ crtc->base.base.id, pll->name);
+
+ WARN_ON(pll->refcount);
+
+ goto found;
+ }
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->refcount == 0)
+ continue;
+
+ if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
+ sizeof(pll->hw_state)) == 0) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
+ crtc->base.base.id,
+ pll->name, pll->refcount, pll->active);
+
+ goto found;
}
+ }
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_SEL_PCDCLK;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
-
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) != 0) {
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+ if (pll->refcount == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
+ crtc->base.base.id, pll->name);
+ goto found;
}
+ }
- /* Wait for the clocks to turn off. */
- udelay(100);
- break;
+ return NULL;
+
+found:
+ if (pll->refcount == 0)
+ pll->hw_state = crtc->config.dpll_hw_state;
+
+ crtc->config.shared_dpll = i;
+ DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+ pipe_name(crtc->pipe));
+
+ pll->refcount++;
+
+ return pll;
+}
+
+static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dslreg = PIPEDSL(pipe);
+ u32 temp;
+
+ temp = I915_READ(dslreg);
+ udelay(500);
+ if (wait_for(I915_READ(dslreg) != temp, 5)) {
+ if (wait_for(I915_READ(dslreg) != temp, 5))
+ DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
}
}
-static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
- struct intel_overlay *overlay;
- int ret;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ if (crtc->config.pch_pfit.enabled) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ else
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ }
+}
+
+static void intel_enable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ intel_plane = to_intel_plane(plane);
+ if (intel_plane->pipe == pipe)
+ intel_plane_restore(&intel_plane->base);
+ }
+}
+
+static void intel_disable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ intel_plane = to_intel_plane(plane);
+ if (intel_plane->pipe == pipe)
+ intel_plane_disable(&intel_plane->base);
+ }
+}
+
+void hsw_enable_ips(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ /* We can only enable IPS after we enable a plane and wait for a vblank */
+ intel_wait_for_vblank(dev, crtc->pipe);
+
+ assert_plane_enabled(dev_priv, crtc->plane);
+ if (IS_BROADWELL(dev)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ /* Quoting Art Runyan: "its not safe to expect any particular
+ * value in IPS_CTL bit 31 after enabling IPS through the
+ * mailbox." Moreover, the mailbox may return a bogus state,
+ * so we need to just enable it and continue on.
+ */
+ } else {
+ I915_WRITE(IPS_CTL, IPS_ENABLE);
+ /* The bit only becomes 1 in the next vblank, so this wait here
+ * is essentially intel_wait_for_vblank. If we don't have this
+ * and don't wait for vblanks until the end of crtc_enable, then
+ * the HW state readout code will complain that the expected
+ * IPS_CTL value is not the one we read. */
+ if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+ DRM_ERROR("Timed out waiting for IPS enable\n");
+ }
+}
+
+void hsw_disable_ips(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ assert_plane_enabled(dev_priv, crtc->plane);
+ if (IS_BROADWELL(dev)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+ if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
+ DRM_ERROR("Timed out waiting for IPS disable\n");
+ } else {
+ I915_WRITE(IPS_CTL, 0);
+ POSTING_READ(IPS_CTL);
+ }
+
+ /* We need to wait for a vblank before we can disable the plane. */
+ intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int palreg = PALETTE(pipe);
+ int i;
+ bool reenable_ips = false;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled || !intel_crtc->active)
+ return;
+
+ if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
+ }
+
+ /* use legacy palette for Ironlake */
+ if (HAS_PCH_SPLIT(dev))
+ palreg = LGC_PALETTE(pipe);
+
+ /* Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ */
+ if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
+ ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_SPLIT)) {
+ hsw_disable_ips(intel_crtc);
+ reenable_ips = true;
+ }
+
+ for (i = 0; i < 256; i++) {
+ I915_WRITE(palreg + 4 * i,
+ (intel_crtc->lut_r[i] << 16) |
+ (intel_crtc->lut_g[i] << 8) |
+ intel_crtc->lut_b[i]);
+ }
+
+ if (reenable_ips)
+ hsw_enable_ips(intel_crtc);
+}
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
if (!enable && intel_crtc->overlay) {
- overlay = intel_crtc->overlay;
- mutex_lock(&overlay->dev->struct_mutex);
- for (;;) {
- ret = intel_overlay_switch_off(overlay);
- if (ret == 0)
- break;
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- ret = intel_overlay_recover_from_interrupt(overlay, 0);
- if (ret != 0) {
- /* overlay doesn't react anymore. Usually
- * results in a black screen and an unkillable
- * X server. */
- BUG();
- overlay->hw_wedged = HW_WEDGED;
- break;
- }
- }
- mutex_unlock(&overlay->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
+ mutex_unlock(&dev->struct_mutex);
}
+
/* Let userspace switch the overlay on again. In most cases userspace
- * has to recompute where to put it anyway. */
+ * has to recompute where to put it anyway.
+ */
+}
+
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 cntl = I915_READ(CURCNTR(pipe));
+
+ if ((cntl & CURSOR_MODE) == 0) {
+ u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
- return;
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+ I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+ I915_WRITE(CURCNTR(pipe), cntl);
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+ }
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 temp;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ intel_enable_primary_hw_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ /* The fixup needs to happen before cursor is enabled */
+ if (IS_G4X(dev))
+ g4x_fixup_plane(dev_priv, pipe);
+ intel_crtc_update_cursor(crtc, true);
+ intel_crtc_dpms_overlay(intel_crtc, true);
+
+ hsw_enable_ips(intel_crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_crtc_vblank_off(crtc);
+
+ if (dev_priv->fbc.plane == plane)
+ intel_disable_fbc(dev);
+
+ hsw_disable_ips(intel_crtc);
+
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+ intel_disable_planes(crtc);
+ intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ enum plane plane = intel_crtc->plane;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ if (intel_crtc->config.has_pch_encoder)
+ intel_prepare_shared_dpll(intel_crtc);
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ ironlake_set_pipeconf(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
+ intel_crtc->active = true;
+
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ /* Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling. */
+ ironlake_fdi_pll_enable(intel_crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ ironlake_pfit_enable(intel_crtc);
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
*/
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- intel_update_watermarks(dev);
-
- /* Enable the DPLL */
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(dpll_reg, temp);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
+ intel_crtc_load_lut(crtc);
- /* Enable the pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0)
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
-
- /* Enable the plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(intel_crtc);
- intel_crtc_load_lut(crtc);
+ if (intel_crtc->config.has_pch_encoder)
+ ironlake_pch_enable(crtc);
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
- /* Give the overlay scaler a chance to enable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, true);
- break;
- case DRM_MODE_DPMS_OFF:
- intel_update_watermarks(dev);
+ if (HAS_PCH_CPT(dev))
+ cpt_verify_modeset(dev, intel_crtc->pipe);
- /* Give the overlay scaler a chance to disable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, false);
- drm_vblank_off(dev, pipe);
+ intel_crtc_enable_planes(crtc);
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ drm_crtc_vblank_on(crtc);
+}
- /* Disable the VGA plane that we never use */
- i915_disable_vga(dev);
+/* IPS only exists on ULT machines and is tied to pipe A. */
+static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+ return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+}
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
- }
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_crtc *crtc_it, *other_active_crtc = NULL;
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
+ /* We want to get the other_active_crtc only if there's only 1 other
+ * active crtc. */
+ for_each_intel_crtc(dev, crtc_it) {
+ if (!crtc_it->active || crtc_it == crtc)
+ continue;
+
+ if (other_active_crtc)
+ return;
+
+ other_active_crtc = crtc_it;
+ }
+ if (!other_active_crtc)
+ return;
+
+ intel_wait_for_vblank(dev, other_active_crtc->pipe);
+ intel_wait_for_vblank(dev, other_active_crtc->pipe);
+}
+
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ enum plane plane = intel_crtc->plane;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ haswell_set_pipeconf(crtc);
+
+ intel_set_pipe_csc(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
+ intel_crtc->active = true;
+
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ if (intel_crtc->config.has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+
+ if (intel_crtc->config.has_pch_encoder)
+ dev_priv->display.fdi_link_train(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ intel_ddi_enable_pipe_clock(intel_crtc);
+
+ ironlake_pfit_enable(intel_crtc);
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_enable_transcoder_func(crtc);
+
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder)
+ lpt_pch_enable(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ encoder->enable(encoder);
+ intel_opregion_notify_encoder(encoder, true);
+ }
+
+ /* If we change the relative order between pipe/planes enabling, we need
+ * to change the workaround. */
+ haswell_mode_set_planes_workaround(intel_crtc);
+ intel_crtc_enable_planes(crtc);
+
+ drm_crtc_vblank_on(crtc);
+}
- /* Next, disable display pipes */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
+static void ironlake_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ /* To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right. */
+ if (crtc->config.pch_pfit.enabled) {
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_POS(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+ }
+}
+
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ if (!intel_crtc->active)
+ return;
+
+ intel_crtc_disable_planes(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ if (intel_crtc->config.has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ ironlake_pfit_disable(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ ironlake_fdi_disable(crtc);
+
+ ironlake_disable_pch_transcoder(dev_priv, pipe);
+ intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
+ I915_WRITE(reg, temp);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
+ I915_WRITE(PCH_DPLL_SEL, temp);
}
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
+ /* disable PCH DPLL */
+ intel_disable_shared_dpll(intel_crtc);
+
+ ironlake_fdi_pll_disable(intel_crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ if (!intel_crtc->active)
+ return;
+
+ intel_crtc_disable_planes(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ intel_opregion_notify_encoder(encoder, false);
+ encoder->disable(encoder);
+ }
+
+ if (intel_crtc->config.has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
+ intel_disable_pipe(dev_priv, pipe);
+
+ intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+ ironlake_pfit_disable(intel_crtc);
+
+ intel_ddi_disable_pipe_clock(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ lpt_disable_pch_transcoder(dev_priv);
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ intel_ddi_fdi_disable(crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ intel_put_shared_dpll(intel_crtc);
+}
+
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+ intel_ddi_put_crtc_pll(crtc);
+}
+
+static void i9xx_pfit_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_config *pipe_config = &crtc->config;
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
+ if (!crtc->config.gmch_pfit.control)
+ return;
+
+ /*
+ * The panel fitter should only be adjusted whilst the pipe is disabled,
+ * according to register description and PRM.
+ */
+ WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+
+ /* Border color in case we don't scale up to the full screen. Black by
+ * default, change to something else for debugging. */
+ I915_WRITE(BCLRPAT(crtc->pipe), 0);
+}
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct intel_digital_port *intel_dig_port;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_UNKNOWN:
+ /* Only DDI platforms should ever use this output type */
+ WARN_ON_ONCE(!HAS_DDI(dev));
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_EDP:
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+ case PORT_B:
+ return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+ case PORT_C:
+ return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+ case PORT_D:
+ return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ default:
+ WARN_ON_ONCE(1);
+ return POWER_DOMAIN_PORT_OTHER;
}
+ case INTEL_OUTPUT_ANALOG:
+ return POWER_DOMAIN_PORT_CRT;
+ case INTEL_OUTPUT_DSI:
+ return POWER_DOMAIN_PORT_DSI;
+ default:
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+}
- /* Wait for the clocks to turn off. */
- udelay(150);
- break;
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
+ unsigned long mask;
+ enum transcoder transcoder;
+
+ transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+ mask = BIT(POWER_DOMAIN_PIPE(pipe));
+ mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+ if (pfit_enabled)
+ mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ mask |= BIT(intel_display_port_power_domain(intel_encoder));
+
+ return mask;
+}
+
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_crtc_power_domains(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
+ struct intel_crtc *crtc;
+
+ /*
+ * First get all needed power domains, then put all unneeded, to avoid
+ * any unnecessary toggling of the power wells.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ enum intel_display_power_domain domain;
+
+ if (!crtc->base.enabled)
+ continue;
+
+ pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+
+ for_each_power_domain(domain, pipe_domains[crtc->pipe])
+ intel_display_power_get(dev_priv, domain);
+ }
+
+ for_each_intel_crtc(dev, crtc) {
+ enum intel_display_power_domain domain;
+
+ for_each_power_domain(domain, crtc->enabled_power_domains)
+ intel_display_power_put(dev_priv, domain);
+
+ crtc->enabled_power_domains = pipe_domains[crtc->pipe];
}
+
+ intel_display_set_init_power(dev_priv, false);
}
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+int valleyview_get_vco(struct drm_i915_private *dev_priv)
+{
+ int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+ /* Obtain SKU information */
+ mutex_lock(&dev_priv->dpio_lock);
+ hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+ CCK_FUSE_HPLL_FREQ_MASK;
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return vco_freq[hpll_freq];
+}
+
+/* Adjust CDclk dividers to allow high res or save power if possible */
+static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, cmd;
+
+ WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
+ dev_priv->vlv_cdclk_freq = cdclk;
+
+ if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+ cmd = 2;
+ else if (cdclk == 266)
+ cmd = 1;
+ else
+ cmd = 0;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val &= ~DSPFREQGUAR_MASK;
+ val |= (cmd << DSPFREQGUAR_SHIFT);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 50)) {
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (cdclk == 400) {
+ u32 divider, vco;
+
+ vco = valleyview_get_vco(dev_priv);
+ divider = ((vco << 1) / cdclk) - 1;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ /* adjust cdclk divider */
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val &= ~0xf;
+ val |= divider;
+ vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+ }
+
+ mutex_lock(&dev_priv->dpio_lock);
+ /* adjust self-refresh exit latency value */
+ val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val &= ~0x7f;
+
+ /*
+ * For high bandwidth configs, we set a higher latency in the bunit
+ * so that the core display fetch happens in time to avoid underruns.
+ */
+ if (cdclk == 400)
+ val |= 4500 / 250; /* 4.5 usec */
+ else
+ val |= 3000 / 250; /* 3.0 usec */
+ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
+ intel_i2c_reset(dev);
+}
+
+int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+{
+ int cur_cdclk, vco;
+ int divider;
+
+ vco = valleyview_get_vco(dev_priv);
+
+ mutex_lock(&dev_priv->dpio_lock);
+ divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ divider &= 0xf;
+
+ cur_cdclk = (vco << 1) / (divider + 1);
+
+ return cur_cdclk;
+}
+
+static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+ int max_pixclk)
+{
+ /*
+ * Really only a few cases to deal with, as only 4 CDclks are supported:
+ * 200MHz
+ * 267MHz
+ * 320MHz
+ * 400MHz
+ * So we check to see whether we're above 90% of the lower bin and
+ * adjust if needed.
+ */
+ if (max_pixclk > 288000) {
+ return 400;
+ } else if (max_pixclk > 240000) {
+ return 320;
+ } else
+ return 266;
+ /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+}
+
+/* compute the max pixel clock for new configuration */
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *intel_crtc;
+ int max_pixclk = 0;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ if (intel_crtc->new_enabled)
+ max_pixclk = max(max_pixclk,
+ intel_crtc->new_config->adjusted_mode.crtc_clock);
+ }
+
+ return max_pixclk;
+}
+
+static void valleyview_modeset_global_pipes(struct drm_device *dev,
+ unsigned *prepare_pipes)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ int max_pixclk = intel_mode_max_pixclk(dev_priv);
+
+ if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
+ dev_priv->vlv_cdclk_freq)
+ return;
+
+ /* disable/enable all currently active pipes while we change cdclk */
+ for_each_intel_crtc(dev, intel_crtc)
+ if (intel_crtc->base.enabled)
+ *prepare_pipes |= (1 << intel_crtc->pipe);
+}
+
+static void valleyview_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev_priv);
+ int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+ if (req_cdclk != dev_priv->vlv_cdclk_freq)
+ valleyview_set_cdclk(dev, req_cdclk);
+ modeset_update_crtc_power_domains(dev);
+}
+
+static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- bool enabled;
+ int plane = intel_crtc->plane;
+ bool is_dsi;
+ u32 dspcntr;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+ if (!is_dsi && !IS_CHERRYVIEW(dev))
+ vlv_prepare_pll(intel_crtc);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ i9xx_set_pipeconf(intel_crtc);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
+ intel_crtc->active = true;
+
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
+ if (!is_dsi) {
+ if (IS_CHERRYVIEW(dev))
+ chv_enable_pll(intel_crtc);
+ else
+ vlv_enable_pll(intel_crtc);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ i9xx_pfit_enable(intel_crtc);
+
+ intel_crtc_load_lut(crtc);
+
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ intel_crtc_enable_planes(crtc);
+
+ drm_crtc_vblank_on(crtc);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ i9xx_check_fifo_underruns(dev);
+}
+
+static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
+}
+
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 dspcntr;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ i9xx_set_pll_dividers(intel_crtc);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ i9xx_set_pipeconf(intel_crtc);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
+ intel_crtc->active = true;
+
+ if (!IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ i9xx_enable_pll(intel_crtc);
+
+ i9xx_pfit_enable(intel_crtc);
+
+ intel_crtc_load_lut(crtc);
+
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ intel_crtc_enable_planes(crtc);
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So don't enable underrun reporting before at least some planes
+ * are enabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
+ drm_crtc_vblank_on(crtc);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ i9xx_check_fifo_underruns(dev);
+}
+
+static void i9xx_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!crtc->config.gmch_pfit.control)
+ return;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
+ I915_READ(PFIT_CONTROL));
+ I915_WRITE(PFIT_CONTROL, 0);
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+
+ if (!intel_crtc->active)
+ return;
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So diasble underrun reporting before all the planes get disabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+
+ intel_crtc_disable_planes(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ /*
+ * On gen2 planes are double buffered but the pipe isn't, so we must
+ * wait for planes to fully turn off before disabling the pipe.
+ */
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ i9xx_pfit_disable(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
- dev_priv->display.dpms(crtc, mode);
+ if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
+ if (IS_CHERRYVIEW(dev))
+ chv_disable_pll(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_disable_pll(dev_priv, pipe);
+ else
+ i9xx_disable_pll(dev_priv, pipe);
+ }
- intel_crtc->dpms_mode = mode;
+ if (!IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+
+ intel_crtc->active = false;
+ intel_update_watermarks(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+ bool enabled)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
if (!dev->primary->master)
return;
@@ -1998,8 +4828,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
if (!master_priv->sarea_priv)
return;
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -2010,48 +4838,343 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
}
-static void intel_crtc_prepare (struct drm_crtc *crtc)
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ bool enable = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ enable |= intel_encoder->connectors_active;
+
+ if (enable)
+ dev_priv->display.crtc_enable(crtc);
+ else
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_crtc_update_sarea(crtc, enable);
}
-static void intel_crtc_commit (struct drm_crtc *crtc)
+static void intel_crtc_disable(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* crtc should still be enabled when we disable it. */
+ WARN_ON(!crtc->enabled);
+
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc_update_sarea(crtc, false);
+ dev_priv->display.off(crtc);
+
+ assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
+ assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+
+ if (crtc->primary->fb) {
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ crtc->primary->fb = NULL;
+ }
+
+ /* Update computed state. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ if (connector->encoder->crtc != crtc)
+ continue;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ to_intel_encoder(connector->encoder)->connectors_active = false;
+ }
}
-void intel_encoder_prepare (struct drm_encoder *encoder)
+void intel_encoder_destroy(struct drm_encoder *encoder)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- /* lvds has its own version of prepare see intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
-void intel_encoder_commit (struct drm_encoder *encoder)
+/* Simple dpms helper for encoders with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- /* lvds has its own version of commit see intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ if (mode == DRM_MODE_DPMS_ON) {
+ encoder->connectors_active = true;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ } else {
+ encoder->connectors_active = false;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ }
}
-static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
{
- struct drm_device *dev = crtc->dev;
- if (IS_IRONLAKE(dev)) {
- /* FDI link clock is fixed at 2.7G */
- if (mode->clock * 3 > 27000 * 4)
- return MODE_CLOCK_HIGH;
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc;
+ bool encoder_enabled;
+ enum pipe pipe;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ connector->base.name);
+
+ WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ "wrong connector dpms state\n");
+ WARN(connector->base.encoder != &encoder->base,
+ "active connector not linked to encoder\n");
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
+
+ crtc = encoder->base.crtc;
+
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
}
- return true;
+}
+
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
+{
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ if (connector->encoder)
+ intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
+
+ intel_modeset_check_state(connector->dev);
+}
+
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
+
+ return encoder->get_hw_state(encoder, &pipe);
+}
+
+static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+ DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ if (pipe_config->fdi_lanes > 4) {
+ DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ if (INTEL_INFO(dev)->num_pipes == 2)
+ return true;
+
+ /* Ivybridge 3 pipe is really complicated */
+ switch (pipe) {
+ case PIPE_A:
+ return true;
+ case PIPE_B:
+ if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+ pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ return true;
+ case PIPE_C:
+ if (!pipe_has_enabled_pch(pipe_B_crtc) ||
+ pipe_B_crtc->config.fdi_lanes <= 2) {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ } else {
+ DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ return false;
+ }
+ return true;
+ default:
+ BUG();
+ }
+}
+
+#define RETRY 1
+static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ int lane, link_bw, fdi_dotclock;
+ bool setup_ok, needs_recompute = false;
+
+retry:
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+
+ fdi_dotclock = adjusted_mode->crtc_clock;
+
+ lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
+
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+ link_bw, &pipe_config->fdi_m_n);
+
+ setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
+ intel_crtc->pipe, pipe_config);
+ if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
+ pipe_config->pipe_bpp -= 2*3;
+ DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
+ needs_recompute = true;
+ pipe_config->bw_constrained = true;
+
+ goto retry;
+ }
+
+ if (needs_recompute)
+ return RETRY;
+
+ return setup_ok ? 0 : -EINVAL;
+}
+
+static void hsw_compute_ips_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->ips_enabled = i915.enable_ips &&
+ hsw_crtc_supports_ips(crtc) &&
+ pipe_config->pipe_bpp <= 24;
+}
+
+static int intel_crtc_compute_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+
+ /* FIXME should check pixel clock limits on all platforms */
+ if (INTEL_INFO(dev)->gen < 4) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int clock_limit =
+ dev_priv->display.get_display_clock_speed(dev);
+
+ /*
+ * Enable pixel doubling when the dot clock
+ * is > 90% of the (display) core speed.
+ *
+ * GDG double wide on either pipe,
+ * otherwise pipe A only.
+ */
+ if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
+ adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
+ clock_limit *= 2;
+ pipe_config->double_wide = true;
+ }
+
+ if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
+ return -EINVAL;
+ }
+
+ /*
+ * Pipe horizontal size must be even in:
+ * - DVO ganged mode
+ * - LVDS dual channel mode
+ * - Double wide pipe
+ */
+ if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+ pipe_config->pipe_src_w &= ~1;
+
+ /* Cantiga+ cannot handle modes with a hsync front porch of 0.
+ * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+ */
+ if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+ return -EINVAL;
+
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
+ pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
+ } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
+ /* only a 8bpc pipe, with 6bpc dither through the panel fitter
+ * for lvds. */
+ pipe_config->pipe_bpp = 8*3;
+ }
+
+ if (HAS_IPS(dev))
+ hsw_compute_ips_config(crtc, pipe_config);
+
+ /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
+ * clock survives for now. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ pipe_config->shared_dpll = crtc->config.shared_dpll;
+
+ if (pipe_config->has_pch_encoder)
+ return ironlake_fdi_compute_config(crtc, pipe_config);
+
+ return 0;
+}
+
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000; /* FIXME */
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -2069,6 +5192,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
return 200000;
}
+static int pnv_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+ return 267000;
+ case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+ return 333000;
+ case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+ return 444000;
+ case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+ return 200000;
+ default:
+ DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+ return 133000;
+ case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+ return 167000;
+ }
+}
+
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
@@ -2118,1492 +5265,3097 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133000;
}
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-int intel_panel_fitter_pipe (struct drm_device *dev)
+static void
+intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pfit_control;
+ while (*num > DATA_LINK_M_N_MASK ||
+ *den > DATA_LINK_M_N_MASK) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
- /* i830 doesn't have a panel fitter */
- if (IS_I830(dev))
- return -1;
+static void compute_m_n(unsigned int m, unsigned int n,
+ uint32_t *ret_m, uint32_t *ret_n)
+{
+ *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
+ *ret_m = div_u64((uint64_t) m * *ret_n, n);
+ intel_reduce_m_n_ratio(ret_m, ret_n);
+}
- pfit_control = I915_READ(PFIT_CONTROL);
+void
+intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n)
+{
+ m_n->tu = 64;
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
+ compute_m_n(bits_per_pixel * pixel_clock,
+ link_clock * nlanes * 8,
+ &m_n->gmch_m, &m_n->gmch_n);
- /* 965 can place panel fitter on either pipe */
- if (IS_I965G(dev))
- return (pfit_control >> 29) & 0x3;
+ compute_m_n(pixel_clock, link_clock,
+ &m_n->link_m, &m_n->link_n);
+}
- /* older chips can only use pipe 1 */
- return 1;
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ if (i915.panel_use_ssc >= 0)
+ return i915.panel_use_ssc != 0;
+ return dev_priv->vbt.lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-struct fdi_m_n {
- u32 tu;
- u32 gmch_m;
- u32 gmch_n;
- u32 link_m;
- u32 link_n;
-};
+static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk;
+
+ if (IS_VALLEYVIEW(dev)) {
+ refclk = 100000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
-static void
-fdi_reduce_ratio(u32 *num, u32 *den)
+ return refclk;
+}
+
+static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
{
- while (*num > 0xffffff || *den > 0xffffff) {
- *num >>= 1;
- *den >>= 1;
+ return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->base.dev;
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev)) {
+ fp = pnv_dpll_compute_fp(&crtc->config.dpll);
+ if (reduced_clock)
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
+ if (reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ crtc->config.dpll_hw_state.fp0 = fp;
+
+ crtc->lowfreq_avail = false;
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ reduced_clock && i915.powersave) {
+ crtc->config.dpll_hw_state.fp1 = fp2;
+ crtc->lowfreq_avail = true;
+ } else {
+ crtc->config.dpll_hw_state.fp1 = fp;
}
}
-#define DATA_N 0x800000
-#define LINK_N 0x80000
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+ pipe)
+{
+ u32 reg_val;
-static void
-ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
- int link_clock, struct fdi_m_n *m_n)
+ /*
+ * PLLB opamp always calibrates to max value of 0x3f, force enable it
+ * and set it to a reasonable value instead.
+ */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ reg_val |= 0x00000030;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x8cffffff;
+ reg_val = 0x8c000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0xb0000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+}
+
+static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
{
- u64 temp;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
- m_n->tu = 64; /* default size */
+ I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+ I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+ I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+}
- temp = (u64) DATA_N * pixel_clock;
- temp = div_u64(temp, link_clock);
- m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
- m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
- m_n->gmch_n = DATA_N;
- fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ enum transcoder transcoder = crtc->config.cpu_transcoder;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+ I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+ I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+ } else {
+ I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+ I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
+ I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+ }
+}
- temp = (u64) LINK_N * pixel_clock;
- m_n->link_m = div_u64(temp, link_clock);
- m_n->link_n = LINK_N;
- fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+static void intel_dp_set_m_n(struct intel_crtc *crtc)
+{
+ if (crtc->config.has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ else
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
}
+static void vlv_update_pll(struct intel_crtc *crtc)
+{
+ u32 dpll, dpll_md;
-struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
-};
+ /*
+ * Enable DPIO clock input. We should never disable the reference
+ * clock for pipe B, since VGA hotplug / manual detection depends
+ * on it.
+ */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ /* We should never disable this, set it here for state tracking */
+ if (crtc->pipe == PIPE_B)
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_VCO_ENABLE;
+ crtc->config.dpll_hw_state.dpll = dpll;
-/* Pineview has different values for various configs */
-static struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
-};
-static struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static struct intel_watermark_params i855_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-static struct intel_watermark_params i830_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
+ dpll_md = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
+}
-/**
- * intel_calculate_wm - calculate watermark level
- * @clock_in_khz: pixel clock
- * @wm: chip FIFO params
- * @pixel_size: display pixel size
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- struct intel_watermark_params *wm,
- int pixel_size,
- unsigned long latency_ns)
+static void vlv_prepare_pll(struct intel_crtc *crtc)
{
- long entries_required, wm_size;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ u32 mdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 coreclk, reg_val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ bestn = crtc->config.dpll.n;
+ bestm1 = crtc->config.dpll.m1;
+ bestm2 = crtc->config.dpll.m2;
+ bestp1 = crtc->config.dpll.p1;
+ bestp2 = crtc->config.dpll.p2;
+
+ /* See eDP HDMI DPIO driver vbios notes doc */
+
+ /* PLL B needs special handling */
+ if (pipe == PIPE_B)
+ vlv_pllb_recal_opamp(dev_priv, pipe);
+
+ /* Set up Tx target for periodic Rcomp update */
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+
+ /* Disable target IRef on PLL */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+ reg_val &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+
+ /* Disable fast lock */
+ vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+
+ /* Set idtafcrecal before PLL is enabled */
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_K_SHIFT);
/*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
+ * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+ * but we don't support that).
+ * Note: don't use the DAC post divider as it seems unstable.
*/
- entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required /= wm->cacheline_size;
+ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ /* Set HBR and RBR LPF coefficients */
+ if (crtc->config.port_clock == 162000 ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x009f0003);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x00d0000f);
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
+ /* Use SSC source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ } else { /* HDMI or VGA */
+ /* Use bend source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ }
+
+ coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+ coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
+ coreclk |= 0x01000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
- DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
+static void chv_update_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ int dpll_reg = DPLL(crtc->pipe);
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 loopfilter, intcoeff;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+ int refclk;
+
+ crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+ DPLL_VCO_ENABLE;
+ if (pipe != PIPE_A)
+ crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ crtc->config.dpll_hw_state.dpll_md =
+ (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+
+ bestn = crtc->config.dpll.n;
+ bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
+ bestm1 = crtc->config.dpll.m1;
+ bestm2 = crtc->config.dpll.m2 >> 22;
+ bestp1 = crtc->config.dpll.p1;
+ bestp2 = crtc->config.dpll.p2;
- wm_size = wm->fifo_size - (entries_required + wm->guard_size);
+ /*
+ * Enable Refclk and SSC
+ */
+ I915_WRITE(dpll_reg,
+ crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* p1 and p2 divider */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ 5 << DPIO_CHV_S1_DIV_SHIFT |
+ bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+ bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+ 1 << DPIO_CHV_K_DIV_SHIFT);
+
+ /* Feedback post-divider - m2 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+ /* Feedback refclk divider - n and m1 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ DPIO_CHV_M1_DIV_BY_2 |
+ 1 << DPIO_CHV_N_DIV_SHIFT);
+
+ /* M2 fraction division */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+ /* M2 fraction division enable */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
+ DPIO_CHV_FRAC_DIV_EN |
+ (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
+
+ /* Loop filter */
+ refclk = i9xx_get_refclk(&crtc->base, 0);
+ loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
+ 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
+ if (refclk == 100000)
+ intcoeff = 11;
+ else if (refclk == 38400)
+ intcoeff = 10;
+ else
+ intcoeff = 9;
+ loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
- DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
+ /* AFC Recal */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ DPIO_AFC_RECAL);
- /* Don't promote wm_size to unsigned... */
- if (wm_size > (long)wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
- return wm_size;
+ mutex_unlock(&dev_priv->dpio_lock);
}
-struct cxsr_latency {
- int is_desktop;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
-};
+static void i9xx_update_pll(struct intel_crtc *crtc,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll;
+ bool is_sdvo;
+ struct dpll *clock = &crtc->config.dpll;
-static struct cxsr_latency cxsr_latency_table[] = {
- {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ i9xx_update_pll_dividers(crtc, reduced_clock);
- {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
- {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ dpll = DPLL_VGA_MODE_DIS;
- {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
- {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ dpll |= (crtc->config.pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
- {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
-};
+ if (is_sdvo)
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
- int mem)
+ if (crtc->config.sdvo_tv_clock)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc->config.dpll_hw_state.dpll = dpll;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 dpll_md = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ }
+}
+
+static void i8xx_update_pll(struct intel_crtc *crtc,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
{
- int i;
- struct cxsr_latency *latency;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll;
+ struct dpll *clock = &crtc->config.dpll;
- if (fsb == 0 || mem == 0)
- return NULL;
+ i9xx_update_pll_dividers(crtc, reduced_clock);
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
}
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
- return NULL;
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc->config.dpll_hw_state.dpll = dpll;
}
-static void pineview_disable_cxsr(struct drm_device *dev)
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ uint32_t crtc_vtotal, crtc_vblank_end;
+ int vsyncshift = 0;
+
+ /* We need to be careful not to changed the adjusted mode, for otherwise
+ * the hw state checker will get angry at the mismatch. */
+ crtc_vtotal = adjusted_mode->crtc_vtotal;
+ crtc_vblank_end = adjusted_mode->crtc_vblank_end;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ crtc_vtotal -= 1;
+ crtc_vblank_end -= 1;
+
+ if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
+ else
+ vsyncshift = adjusted_mode->crtc_hsync_start -
+ adjusted_mode->crtc_htotal / 2;
+ if (vsyncshift < 0)
+ vsyncshift += adjusted_mode->crtc_htotal;
+ }
- /* deactivate cxsr */
- reg = I915_READ(DSPFW3);
- reg &= ~(PINEVIEW_SELF_REFRESH_EN);
- I915_WRITE(DSPFW3, reg);
- DRM_INFO("Big FIFO is disabled\n");
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+ I915_WRITE(HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((intel_crtc->config.pipe_src_w - 1) << 16) |
+ (intel_crtc->config.pipe_src_h - 1));
}
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
+static void intel_get_pipe_timings(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
- unsigned long wm;
- struct cxsr_latency *latency;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ uint32_t tmp;
+
+ tmp = I915_READ(HTOTAL(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(HBLANK(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(HSYNC(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+ tmp = I915_READ(VTOTAL(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(VBLANK(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(VSYNC(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+ if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->adjusted_mode.crtc_vtotal += 1;
+ pipe_config->adjusted_mode.crtc_vblank_end += 1;
}
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
- latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= 0x7fffff;
- reg |= wm << 23;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
- latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 24);
- reg |= (wm & 0x3f) << 24;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
- reg = I915_READ(DSPFW3);
- reg &= 0xfffffe00;
- reg |= wm & 0x1ff;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
- latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 16);
- reg |= (wm & 0x3f) << 16;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
-
- DRM_INFO("Big FIFO is enabled\n");
-
- return;
-}
+ tmp = I915_READ(PIPESRC(crtc->pipe));
+ pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+ pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int latency_ns = 5000;
+ pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
+}
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+ struct intel_crtc_config *pipe_config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
+ mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
- if (plane == 0)
- size = dsparb & 0x7f;
- else
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
- (dsparb & 0x7f);
+ mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ mode->flags = pipe_config->adjusted_mode.flags;
- return size;
+ mode->clock = pipe_config->adjusted_mode.crtc_clock;
+ mode->flags |= pipe_config->adjusted_mode.flags;
}
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
+ uint32_t pipeconf;
- if (plane == 0)
- size = dsparb & 0x1ff;
- else
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
- (dsparb & 0x1ff);
- size >>= 1; /* Convert to cachelines */
+ pipeconf = 0;
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+ pipeconf |= PIPECONF_ENABLE;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ if (intel_crtc->config.double_wide)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
- return size;
+ /* only g4x and later have fancy bpc/dither controls */
+ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+ /* Bspec claims that we can't use dithering for 30bpp pipes. */
+ if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
+ pipeconf |= PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ pipeconf |= PIPECONF_6BPC;
+ break;
+ case 24:
+ pipeconf |= PIPECONF_8BPC;
+ break;
+ case 30:
+ pipeconf |= PIPECONF_10BPC;
+ break;
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
+ }
+ }
+
+ if (HAS_PIPE_CXSR(dev)) {
+ if (intel_crtc->lowfreq_avail) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ } else {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ }
+ }
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (INTEL_INFO(dev)->gen < 4 ||
+ intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ else
+ pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+ } else
+ pipeconf |= PIPECONF_PROGRESSIVE;
+
+ if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
+ pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+
+ I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
+ POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *fb)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int refclk, num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dsi = false;
+ struct intel_encoder *encoder;
+ const intel_limit_t *limit;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DSI:
+ is_dsi = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ if (is_dsi)
+ return 0;
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
+ if (!intel_crtc->config.clock_set) {
+ refclk = i9xx_get_refclk(crtc, num_connectors);
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
+ /*
+ * Returns a set of divisors for the desired target clock with
+ * the given refclk, or FALSE. The returned values represent
+ * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
+ * 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc,
+ intel_crtc->config.port_clock,
+ refclk, NULL, &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
- return size;
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target
+ * clock. If the clocks don't match, we can't switch
+ * the display clock by using the FP0/FP1. In such case
+ * we will disable the LVDS downclock feature.
+ */
+ has_reduced_clock =
+ dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk, &clock,
+ &reduced_clock);
+ }
+ /* Compat-code for transition, will disappear. */
+ intel_crtc->config.dpll.n = clock.n;
+ intel_crtc->config.dpll.m1 = clock.m1;
+ intel_crtc->config.dpll.m2 = clock.m2;
+ intel_crtc->config.dpll.p1 = clock.p1;
+ intel_crtc->config.dpll.p2 = clock.p2;
+ }
+
+ if (IS_GEN2(dev)) {
+ i8xx_update_pll(intel_crtc,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+ } else if (IS_CHERRYVIEW(dev)) {
+ chv_update_pll(intel_crtc);
+ } else if (IS_VALLEYVIEW(dev)) {
+ vlv_update_pll(intel_crtc);
+ } else {
+ i9xx_update_pll(intel_crtc,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+ }
+
+ return 0;
}
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
+static void i9xx_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
+ uint32_t tmp;
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
+ if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ return;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ tmp = I915_READ(PFIT_CONTROL);
+ if (!(tmp & PFIT_ENABLE))
+ return;
- return size;
+ /* Check whether the pfit is attached to our pipe. */
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (crtc->pipe != PIPE_B)
+ return;
+ } else {
+ if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
+ return;
+ }
+
+ pipe_config->gmch_pfit.control = tmp;
+ pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+ if (INTEL_INFO(dev)->gen < 5)
+ pipe_config->gmch_pfit.lvds_border_bits =
+ I915_READ(LVDS) & LVDS_BORDER_ENABLE;
}
-static void g4x_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int total_size, cacheline_size;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0, entries_required;
+ int pipe = pipe_config->cpu_transcoder;
+ intel_clock_t clock;
+ u32 mdiv;
+ int refclk = 100000;
- /* Create copies of the base settings for each pipe */
- planea_params = planeb_params = g4x_wm_info;
+ mutex_lock(&dev_priv->dpio_lock);
+ mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
+ mutex_unlock(&dev_priv->dpio_lock);
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
+ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+ clock.m2 = mdiv & DPIO_M2DIV_MASK;
+ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required /= G4X_FIFO_LINE_SIZE;
- planea_wm = entries_required + planea_params.guard_size;
+ vlv_clock(refclk, &clock);
- entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required /= G4X_FIFO_LINE_SIZE;
- planeb_wm = entries_required + planeb_params.guard_size;
+ /* clock.dot is the fast clock */
+ pipe_config->port_clock = clock.dot / 5;
+}
- cursora_wm = cursorb_wm = 16;
- cursor_sr = 32;
+static void i9xx_get_plane_config(struct intel_crtc *crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, base, offset;
+ int pipe = crtc->pipe, plane = crtc->plane;
+ int fourcc, pixel_format;
+ int aligned_height;
+
+ crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
+ if (!crtc->base.primary->fb) {
+ DRM_DEBUG_KMS("failed to alloc fb\n");
+ return;
+ }
- DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ val = I915_READ(DSPCNTR(plane));
- /* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
+ if (INTEL_INFO(dev)->gen >= 4)
+ if (val & DISPPLANE_TILED)
+ plane_config->tiled = true;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+ fourcc = intel_format_to_fourcc(pixel_format);
+ crtc->base.primary->fb->pixel_format = fourcc;
+ crtc->base.primary->fb->bits_per_pixel =
+ drm_format_plane_cpp(fourcc, 0) * 8;
- /* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / cacheline_size, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (plane_config->tiled)
+ offset = I915_READ(DSPTILEOFF(plane));
+ else
+ offset = I915_READ(DSPLINOFF(plane));
+ base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ } else {
+ base = I915_READ(DSPADDR(plane));
}
+ plane_config->base = base;
+
+ val = I915_READ(PIPESRC(pipe));
+ crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
+ crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
- planea_wm, planeb_wm, sr_entries);
+ val = I915_READ(DSPSTRIDE(pipe));
+ crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
- planea_wm &= 0x3f;
- planeb_wm &= 0x3f;
+ aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
+ plane_config->tiled);
+
+ plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe, plane, crtc->base.primary->fb->width,
+ crtc->base.primary->fb->height,
+ crtc->base.primary->fb->bits_per_pixel, base,
+ crtc->base.primary->fb->pitches[0],
+ plane_config->size);
- I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
- I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- /* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+static void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long line_time_us;
- int sr_clock, sr_entries, srwm = 1;
+ int pipe = pipe_config->cpu_transcoder;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ intel_clock_t clock;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+ int refclk = 100000;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
+ pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
+ pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
+ pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+ clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+ clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+ clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+ chv_clock(refclk, &clock);
+
+ /* clock.dot is the fast clock */
+ pipe_config->port_clock = clock.dot / 5;
+}
+
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ tmp = I915_READ(PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+ switch (tmp & PIPECONF_BPC_MASK) {
+ case PIPECONF_6BPC:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case PIPECONF_8BPC:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case PIPECONF_10BPC:
+ pipe_config->pipe_bpp = 30;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ pipe_config->limited_color_range = true;
- /* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
+ if (INTEL_INFO(dev)->gen < 4)
+ pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ intel_get_pipe_timings(crtc, pipe_config);
- /* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- srwm = I945_FIFO_SIZE - sr_entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x3f;
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ i9xx_get_pfit_config(crtc, pipe_config);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ tmp = I915_READ(DPLL_MD(crtc->pipe));
+ pipe_config->pixel_multiplier =
+ ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
+ >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+ pipe_config->dpll_hw_state.dpll_md = tmp;
+ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ tmp = I915_READ(DPLL(crtc->pipe));
+ pipe_config->pixel_multiplier =
+ ((tmp & SDVO_MULTIPLIER_MASK)
+ >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
+ } else {
+ /* Note that on i915G/GM the pixel multiplier is in the sdvo
+ * port and will be fixed up in the encoder->get_config
+ * function. */
+ pipe_config->pixel_multiplier = 1;
+ }
+ pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+ if (!IS_VALLEYVIEW(dev)) {
+ pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
+ if (IS_CHERRYVIEW(dev))
+ chv_crtc_clock_get(crtc, pipe_config);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_crtc_clock_get(crtc, pipe_config);
+ else
+ i9xx_crtc_clock_get(crtc, pipe_config);
- /* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
- (8 << 0));
- I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ return true;
}
-static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
- int total_size, cacheline_size, cwm, srwm = 1;
- int planea_wm, planeb_wm;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ u32 val, final;
+ bool has_lvds = false;
+ bool has_cpu_edp = false;
+ bool has_panel = false;
+ bool has_ck505 = false;
+ bool can_ssc = false;
+
+ /* We need to take the global config into account */
+ list_for_each_entry(encoder, &mode_config->encoder_list,
+ base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_panel = true;
+ has_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_panel = true;
+ if (enc_to_dig_port(&encoder->base)->port == PORT_A)
+ has_cpu_edp = true;
+ break;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev)) {
+ has_ck505 = dev_priv->vbt.display_clock_mode;
+ can_ssc = has_ck505;
+ } else {
+ has_ck505 = false;
+ can_ssc = true;
+ }
- /* Create copies of the base settings for each pipe */
- if (IS_I965GM(dev) || IS_I945GM(dev))
- planea_params = planeb_params = i945_wm_info;
- else if (IS_I9XX(dev))
- planea_params = planeb_params = i915_wm_info;
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
+ has_panel, has_lvds, has_ck505);
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ val = I915_READ(PCH_DREF_CONTROL);
+
+ /* As we must carefully and slowly disable/enable each source in turn,
+ * compute the final state we want first and check if we need to
+ * make any changes at all.
+ */
+ final = val;
+ final &= ~DREF_NONSPREAD_SOURCE_MASK;
+ if (has_ck505)
+ final |= DREF_NONSPREAD_CK505_ENABLE;
else
- planea_params = planeb_params = i855_wm_info;
+ final |= DREF_NONSPREAD_SOURCE_ENABLE;
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
+ final &= ~DREF_SSC_SOURCE_MASK;
+ final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ final &= ~DREF_SSC1_ENABLE;
- /* Update per-plane FIFO sizes */
- planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ if (has_panel) {
+ final |= DREF_SSC_SOURCE_ENABLE;
- planea_wm = intel_calculate_wm(planea_clock, &planea_params,
- pixel_size, latency_ns);
- planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
- pixel_size, latency_ns);
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_SSC1_ENABLE;
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ } else {
+ final |= DREF_SSC_SOURCE_DISABLE;
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ }
+
+ if (final == val)
+ return;
+
+ /* Always enable nonspread source */
+ val &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+ if (has_ck505)
+ val |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ val |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ if (has_panel) {
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_ENABLE;
+
+ /* SSC must be turned on before enabling the CPU output */
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on panel\n");
+ val |= DREF_SSC1_ENABLE;
+ } else
+ val &= ~DREF_SSC1_ENABLE;
+
+ /* Get SSC going before enabling the outputs */
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && sr_hdisplay &&
- (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on eDP\n");
+ val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ } else
+ val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ } else {
+ DRM_DEBUG_KMS("Disabling SSC entirely\n");
- /* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / cacheline_size, 1);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
- srwm = total_size - sr_entries;
- if (srwm < 0)
- srwm = 1;
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Turn off CPU output */
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
+
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
+ BUG_ON(val != final);
+}
+
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+ uint32_t tmp;
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+ uint32_t tmp;
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
-static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
- int unused2, int pixel_size)
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
+ bool with_fdi)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
- int planea_wm;
+ uint32_t reg, tmp;
+
+ if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ with_spread = true;
+ if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
+ with_fdi = false;
+
+ mutex_lock(&dev_priv->dpio_lock);
- i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
- pixel_size, latency_ns);
- fwater_lo |= (3<<8) | planea_wm;
+ udelay(24);
+
+ if (with_spread) {
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (with_fdi) {
+ lpt_reset_fdi_mphy(dev_priv);
+ lpt_program_fdi_mphy(dev_priv);
+ }
+ }
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
- I915_WRITE(FW_BLC, fwater_lo);
+ mutex_unlock(&dev_priv->dpio_lock);
}
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-static void intel_update_watermarks(struct drm_device *dev)
+/* Sequence to disable CLKOUT_DP */
+static void lpt_disable_clkout_dp(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
- int sr_hdisplay = 0;
- unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
- int enabled = 0, pixel_size = 0;
+ uint32_t reg, tmp;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ if (!(tmp & SBI_SSCCTL_DISABLE)) {
+ if (!(tmp & SBI_SSCCTL_PATHALT)) {
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ udelay(32);
+ }
+ tmp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ }
- if (!dev_priv->display.update_wm)
- return;
+ mutex_unlock(&dev_priv->dpio_lock);
+}
- /* Get the clock config from both planes */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- if (crtc->enabled) {
- enabled++;
- if (intel_crtc->plane == 0) {
- DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planea_clock = crtc->mode.clock;
- } else {
- DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planeb_clock = crtc->mode.clock;
- }
- sr_hdisplay = crtc->mode.hdisplay;
- sr_clock = crtc->mode.clock;
- if (crtc->fb)
- pixel_size = crtc->fb->bits_per_pixel / 8;
- else
- pixel_size = 4; /* by default */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
}
}
- if (enabled <= 0)
- return;
+ if (has_vga)
+ lpt_enable_clkout_dp(dev, true, true);
+ else
+ lpt_disable_clkout_dp(dev);
+}
- /* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_PINEVIEW(dev))
- pineview_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_PINEVIEW(dev))
- pineview_disable_cxsr(dev);
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ironlake_init_pch_refclk(dev);
+ else if (HAS_PCH_LPT(dev))
+ lpt_init_pch_refclk(dev);
+}
+
+static int ironlake_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ int num_connectors = 0;
+ bool is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ }
+ num_connectors++;
+ }
- dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
- sr_hdisplay, pixel_size);
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ return dev_priv->vbt.lvds_ssc_freq;
+ }
+
+ return 120000;
}
-static int intel_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t val;
+
+ val = 0;
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ val |= PIPECONF_6BPC;
+ break;
+ case 24:
+ val |= PIPECONF_8BPC;
+ break;
+ case 30:
+ val |= PIPECONF_10BPC;
+ break;
+ case 36:
+ val |= PIPECONF_12BPC;
+ break;
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
+ }
+
+ if (intel_crtc->config.dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ if (intel_crtc->config.limited_color_range)
+ val |= PIPECONF_COLOR_RANGE_SELECT;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+/*
+ * Set up the pipe CSC unit.
+ *
+ * Currently only full range RGB to limited range RGB conversion
+ * is supported, but eventually this should handle various
+ * RGB<->YCbCr scenarios as well.
+ */
+static void intel_set_pipe_csc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
- int refclk, num_outputs = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
- bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- bool is_edp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
- const intel_limit_t *limit;
- int ret;
- struct fdi_m_n m_n = {0};
- int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
- int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
- int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
- int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
- int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int lvds_reg = LVDS;
- u32 temp;
- int sdvo_pixel_multiply;
- int target_clock;
+ uint16_t coeff = 0x7800; /* 1.0 */
- drm_vblank_pre_modeset(dev, pipe);
+ /*
+ * TODO: Check what kind of values actually come out of the pipe
+ * with these coeff/postoff values and adjust to get the best
+ * accuracy. Perhaps we even need to take the bpc value into
+ * consideration.
+ */
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
+ if (intel_crtc->config.limited_color_range)
+ coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
- if (!connector->encoder || connector->encoder->crtc != crtc)
- continue;
+ /*
+ * GY/GU and RY/RU should be the other way around according
+ * to BSpec, but reality doesn't agree. Just set them up in
+ * a way that results in the correct picture.
+ */
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
- switch (intel_output->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_SDVO:
- case INTEL_OUTPUT_HDMI:
- is_sdvo = true;
- if (intel_output->needs_tv_clock)
- is_tv = true;
- break;
- case INTEL_OUTPUT_DVO:
- is_dvo = true;
- break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ if (INTEL_INFO(dev)->gen > 6) {
+ uint16_t postoff = 0;
+
+ if (intel_crtc->config.limited_color_range)
+ postoff = (16 * (1 << 12) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ } else {
+ uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+ if (intel_crtc->config.limited_color_range)
+ mode |= CSC_BLACK_SCREEN_OFFSET;
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ }
+}
+
+static void haswell_set_pipeconf(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ uint32_t val;
+
+ val = 0;
+
+ if (IS_HASWELL(dev) && intel_crtc->config.dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(cpu_transcoder), val);
+ POSTING_READ(PIPECONF(cpu_transcoder));
+
+ I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+ POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+
+ if (IS_BROADWELL(dev)) {
+ val = 0;
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ val |= PIPEMISC_DITHER_6_BPC;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
+ case 24:
+ val |= PIPEMISC_DITHER_8_BPC;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
+ case 30:
+ val |= PIPEMISC_DITHER_10_BPC;
break;
- case INTEL_OUTPUT_EDP:
- is_edp = true;
+ case 36:
+ val |= PIPEMISC_DITHER_12_BPC;
break;
+ default:
+ /* Case prevented by pipe_config_set_bpp. */
+ BUG();
}
- num_outputs++;
+ if (intel_crtc->config.dither)
+ val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+ I915_WRITE(PIPEMISC(pipe), val);
}
+}
- if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
- refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else if (IS_I9XX(dev)) {
- refclk = 96000;
- if (IS_IRONLAKE(dev))
- refclk = 120000; /* 120Mhz refclk */
- } else {
- refclk = 48000;
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ }
}
-
+
+ refclk = ironlake_get_refclk(crtc);
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- drm_vblank_post_modeset(dev, pipe);
- return -EINVAL;
+ limit = intel_limit(crtc, refclk);
+ ret = dev_priv->display.find_dpll(limit, crtc,
+ to_intel_crtc(crtc)->config.port_clock,
+ refclk, NULL, clock);
+ if (!ret)
+ return false;
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ *has_reduced_clock =
+ dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk, clock,
+ reduced_clock);
}
- if (is_lvds && limit->find_reduced_pll &&
- dev_priv->lvds_downclock_avail) {
- memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
- has_reduced_clock = limit->find_reduced_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &reduced_clock);
- if (has_reduced_clock && (clock.p != reduced_clock.p)) {
- /*
- * If the different P is found, it means that we can't
- * switch the display clock by using the FP0/FP1.
- * In such case we will disable the LVDS downclock
- * feature.
- */
- DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
- has_reduced_clock = 0;
- }
- }
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (is_sdvo && is_tv) {
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 3;
- clock.m1 = 16;
- clock.m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 6;
- clock.m1 = 12;
- clock.m2 = 8;
- }
- }
+ return true;
+}
- /* FDI link */
- if (IS_IRONLAKE(dev)) {
- int lane, link_bw, bpp;
- /* eDP doesn't require FDI link, so just set DP M/N
- according to current link config */
- if (is_edp) {
- struct drm_connector *edp;
- target_clock = mode->clock;
- edp = intel_pipe_get_output(crtc);
- intel_edp_link_config(to_intel_output(edp),
- &lane, &link_bw);
- } else {
- /* DP over FDI requires target mode clock
- instead of link clock */
- if (is_dp)
- target_clock = mode->clock;
- else
- target_clock = adjusted_mode->clock;
- lane = 4;
- link_bw = 270000;
- }
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return DIV_ROUND_UP(bps, link_bw * 8);
+}
- /* determine panel color depth */
- temp = I915_READ(pipeconf_reg);
- temp &= ~PIPE_BPC_MASK;
- if (is_lvds) {
- int lvds_reg = I915_READ(PCH_LVDS);
- /* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
- temp |= PIPE_8BPC;
- else
- temp |= PIPE_6BPC;
- } else
- temp |= PIPE_8BPC;
- I915_WRITE(pipeconf_reg, temp);
- I915_READ(pipeconf_reg);
+static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+ return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
- switch (temp & PIPE_BPC_MASK) {
- case PIPE_8BPC:
- bpp = 24;
- break;
- case PIPE_10BPC:
- bpp = 30;
- break;
- case PIPE_6BPC:
- bpp = 18;
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ u32 *fp,
+ intel_clock_t *reduced_clock, u32 *fp2)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ uint32_t dpll;
+ int factor, num_connectors = 0;
+ bool is_lvds = false, is_sdvo = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
break;
- case PIPE_12BPC:
- bpp = 36;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
break;
- default:
- DRM_ERROR("unknown pipe bpc value\n");
- bpp = 24;
}
- ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
+ num_connectors++;
}
- /* Ironlake: try to setup display ref clock before DPLL
- * enabling. This is only under driver's control after
- * PCH B stepping, previous chipset stepping should be
- * ignoring this setting.
- */
- if (IS_IRONLAKE(dev)) {
- temp = I915_READ(PCH_DREF_CONTROL);
- /* Always enable nonspread source */
- temp &= ~DREF_NONSPREAD_SOURCE_MASK;
- temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (is_lvds) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+ factor = 25;
+ } else if (intel_crtc->config.sdvo_tv_clock)
+ factor = 20;
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
+ *fp |= FP_CB_TUNE;
- udelay(200);
+ if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
+ *fp2 |= FP_CB_TUNE;
- if (is_edp) {
- if (dev_priv->lvds_use_ssc) {
- temp |= DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ dpll = 0;
- udelay(200);
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- } else {
- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- }
+ dpll |= (intel_crtc->config.pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+ if (is_sdvo)
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+ if (intel_crtc->config.has_dp_encoder)
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (intel_crtc->config.dpll.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ return dpll | DPLL_VCO_ENABLE;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false;
+ struct intel_encoder *encoder;
+ struct intel_shared_dpll *pll;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
}
+
+ num_connectors++;
}
- if (IS_PINEVIEW(dev)) {
- fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = (1 << reduced_clock.n) << 16 |
- reduced_clock.m1 << 8 | reduced_clock.m2;
- } else {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
+ WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+ "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+ ok = ironlake_compute_clocks(crtc, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok && !intel_crtc->config.clock_set) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ /* Compat-code for transition, will disappear. */
+ if (!intel_crtc->config.clock_set) {
+ intel_crtc->config.dpll.n = clock.n;
+ intel_crtc->config.dpll.m1 = clock.m1;
+ intel_crtc->config.dpll.m2 = clock.m2;
+ intel_crtc->config.dpll.p1 = clock.p1;
+ intel_crtc->config.dpll.p2 = clock.p2;
}
- if (!IS_IRONLAKE(dev))
- dpll = DPLL_VGA_MODE_DIS;
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (intel_crtc->config.has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
+ if (has_reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(&reduced_clock);
- if (IS_I9XX(dev)) {
- if (is_lvds)
- dpll |= DPLLB_MODE_LVDS;
+ dpll = ironlake_compute_dpll(intel_crtc,
+ &fp, &reduced_clock,
+ has_reduced_clock ? &fp2 : NULL);
+
+ intel_crtc->config.dpll_hw_state.dpll = dpll;
+ intel_crtc->config.dpll_hw_state.fp0 = fp;
+ if (has_reduced_clock)
+ intel_crtc->config.dpll_hw_state.fp1 = fp2;
else
- dpll |= DPLLB_MODE_DAC_SERIAL;
- if (is_sdvo) {
- dpll |= DPLL_DVO_HIGH_SPEED;
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (IS_IRONLAKE(dev))
- dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
- }
- if (is_dp)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ intel_crtc->config.dpll_hw_state.fp1 = fp;
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- /* also FPA1 */
- if (IS_IRONLAKE(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && has_reduced_clock)
- dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ return -EINVAL;
}
- switch (clock.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
+ } else
+ intel_put_shared_dpll(intel_crtc);
+
+ if (is_lvds && has_reduced_clock && i915.powersave)
+ intel_crtc->lowfreq_avail = true;
+ else
+ intel_crtc->lowfreq_avail = false;
+
+ return 0;
+}
+
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = crtc->pipe;
+
+ m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+ m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+ m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+ m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = crtc->pipe;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+ m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+ m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+ m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ } else {
+ m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+ m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+ m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+ & ~TU_SIZE_MASK;
+ m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+ m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ }
+}
+
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ if (crtc->config.has_pch_encoder)
+ intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+ else
+ intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+ &pipe_config->dp_m_n);
+}
+
+static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+ &pipe_config->fdi_m_n);
+}
+
+static void ironlake_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PF_CTL(crtc->pipe));
+
+ if (tmp & PF_ENABLE) {
+ pipe_config->pch_pfit.enabled = true;
+ pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+
+ /* We currently do not free assignements of panel fitters on
+ * ivb/hsw (since we don't use the higher upscaling modes which
+ * differentiates them) so just WARN about this case for now. */
+ if (IS_GEN7(dev)) {
+ WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
+ PF_PIPE_SEL_IVB(crtc->pipe));
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+ }
+}
+
+static void ironlake_get_plane_config(struct intel_crtc *crtc,
+ struct intel_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, base, offset;
+ int pipe = crtc->pipe, plane = crtc->plane;
+ int fourcc, pixel_format;
+ int aligned_height;
+
+ crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
+ if (!crtc->base.primary->fb) {
+ DRM_DEBUG_KMS("failed to alloc fb\n");
+ return;
+ }
+
+ val = I915_READ(DSPCNTR(plane));
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ if (val & DISPPLANE_TILED)
+ plane_config->tiled = true;
+
+ pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+ fourcc = intel_format_to_fourcc(pixel_format);
+ crtc->base.primary->fb->pixel_format = fourcc;
+ crtc->base.primary->fb->bits_per_pixel =
+ drm_format_plane_cpp(fourcc, 0) * 8;
+
+ base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ offset = I915_READ(DSPOFFSET(plane));
} else {
- if (is_lvds) {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (plane_config->tiled)
+ offset = I915_READ(DSPTILEOFF(plane));
+ else
+ offset = I915_READ(DSPLINOFF(plane));
+ }
+ plane_config->base = base;
+
+ val = I915_READ(PIPESRC(pipe));
+ crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
+ crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
+
+ val = I915_READ(DSPSTRIDE(pipe));
+ crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
+
+ aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
+ plane_config->tiled);
+
+ plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ pipe, plane, crtc->base.primary->fb->width,
+ crtc->base.primary->fb->height,
+ crtc->base.primary->fb->bits_per_pixel, base,
+ crtc->base.primary->fb->pitches[0],
+ plane_config->size);
+}
+
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ tmp = I915_READ(PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ switch (tmp & PIPECONF_BPC_MASK) {
+ case PIPECONF_6BPC:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case PIPECONF_8BPC:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case PIPECONF_10BPC:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case PIPECONF_12BPC:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+
+ if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ pipe_config->limited_color_range = true;
+
+ if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+ struct intel_shared_dpll *pll;
+
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ pipe_config->shared_dpll =
+ (enum intel_dpll_id) crtc->pipe;
} else {
- if (clock.p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
+ tmp = I915_READ(PCH_DPLL_SEL);
+ if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
+ pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
else
- dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock.p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
+ pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
}
- }
- if (is_sdvo && is_tv)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (is_tv)
- /* XXX: just matching BIOS for now */
- /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2)
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
+ pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
- /* setup pipeconf */
- pipeconf = I915_READ(pipeconf_reg);
+ WARN_ON(!pll->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
+ tmp = pipe_config->dpll_hw_state.dpll;
+ pipe_config->pixel_multiplier =
+ ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+ >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
- /* Ironlake's plane is forced to pipe, bit 24 is to
- enable color space conversion */
- if (!IS_IRONLAKE(dev)) {
- if (pipe == 0)
- dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
- else
- dspcntr |= DISPPLANE_SEL_PIPE_B;
+ ironlake_pch_clock_get(crtc, pipe_config);
+ } else {
+ pipe_config->pixel_multiplier = 1;
}
- if (pipe == 0 && !IS_I965G(dev)) {
- /* Enable pixel doubling when the dot clock is > 90% of the (display)
- * core speed.
- *
- * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
- * pipe == 0 check?
- */
- if (mode->clock >
- dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
- pipeconf |= PIPEACONF_DOUBLE_WIDE;
- else
- pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+ intel_get_pipe_timings(crtc, pipe_config);
+
+ ironlake_get_pfit_config(crtc, pipe_config);
+
+ return true;
+}
+
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc)
+ WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ WARN(plls->spll_refcount, "SPLL enabled\n");
+ WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
+ WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+
+ /*
+ * In theory we can still leave IRQs enabled, as long as only the HPD
+ * interrupts remain enabled. We used to check for that, but since it's
+ * gen-specific and since we only disable LCPLL after we fully disable
+ * the interrupts, the check below should be enough.
+ */
+ WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (IS_HASWELL(dev)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
+ val))
+ DRM_ERROR("Failed to disable D_COMP\n");
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ } else {
+ I915_WRITE(D_COMP, val);
}
+ POSTING_READ(D_COMP);
+}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ uint32_t val;
+
+ assert_can_disable_lcpll(dev_priv);
+ val = I915_READ(LCPLL_CTL);
- /* Disable the panel fitter if it was on our pipe */
- if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
- I915_WRITE(PFIT_CONTROL, 0);
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
- DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
- drm_mode_debug_printmodeline(mode);
+ if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
- /* assign to Ironlake registers */
- if (IS_IRONLAKE(dev)) {
- fp_reg = pch_fp_reg;
- dpll_reg = pch_dpll_reg;
+ val = I915_READ(LCPLL_CTL);
}
- if (is_edp) {
- ironlake_disable_pll_edp(crtc);
- } else if ((dpll & DPLL_VCO_ENABLE)) {
- I915_WRITE(fp_reg, fp);
- I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- udelay(150);
+ val |= LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+
+ if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+ DRM_ERROR("LCPLL still locked\n");
+
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+ ndelay(100);
+
+ if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
}
+}
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+ unsigned long irqflags;
+
+ val = I915_READ(LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ /*
+ * Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+ *
+ * The other problem is that hsw_restore_lcpll() is called as part of
+ * the runtime PM resume sequence, so we can't just call
+ * gen6_gt_force_wake_get() because that function calls
+ * intel_runtime_pm_get(), and we can't change the runtime PM refcount
+ * while we are on the resume sequence. So to solve this problem we have
+ * to call special forcewake code that doesn't touch runtime PM and
+ * doesn't enable the forcewake delayed work.
*/
- if (is_lvds) {
- u32 lvds;
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
- if (IS_IRONLAKE(dev))
- lvds_reg = PCH_LVDS;
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
- lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
- /* set the corresponsding LVDS_BORDER bit */
- lvds |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag */
- if (IS_I965G(dev)) {
- if (dev_priv->lvds_dither) {
- if (IS_IRONLAKE(dev))
- pipeconf |= PIPE_ENABLE_DITHER;
- else
- lvds |= LVDS_ENABLE_DITHER;
- } else {
- if (IS_IRONLAKE(dev))
- pipeconf &= ~PIPE_ENABLE_DITHER;
- else
- lvds &= ~LVDS_ENABLE_DITHER;
- }
- }
- I915_WRITE(lvds_reg, lvds);
- I915_READ(lvds_reg);
+ if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
}
- if (is_dp)
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
- if (!is_edp) {
- I915_WRITE(fp_reg, fp);
- I915_WRITE(dpll_reg, dpll);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
-
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
- if (is_sdvo) {
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
- ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
- } else
- I915_WRITE(dpll_md_reg, 0);
- } else {
- /* write it again -- the BIOS does, after all */
- I915_WRITE(dpll_reg, dpll);
- }
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
+ /* See the big comment above. */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t val;
+
+ DRM_DEBUG_KMS("Enabling package C8+\n");
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(fp_reg + 4, fp2);
- intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
- } else {
- I915_WRITE(fp_reg + 4, fp);
- intel_crtc->lowfreq_avail = false;
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ lpt_disable_clkout_dp(dev);
+ hsw_disable_lcpll(dev_priv, true, true);
+}
+
+void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t val;
+
+ DRM_DEBUG_KMS("Disabling package C8+\n");
+
+ hsw_restore_lcpll(dev_priv);
+ lpt_init_pch_refclk(dev);
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ intel_prepare_ddi(dev);
+}
+
+static void snb_modeset_global_resources(struct drm_device *dev)
+{
+ modeset_update_crtc_power_domains(dev);
+}
+
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+ modeset_update_crtc_power_domains(dev);
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_ddi_pll_select(intel_crtc))
+ return -EINVAL;
+ intel_ddi_pll_enable(intel_crtc);
+
+ intel_crtc->lowfreq_avail = false;
+
+ return 0;
+}
+
+static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain pfit_domain;
+ uint32_t tmp;
+
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ enum pipe trans_edp_pipe;
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ WARN(1, "unknown pipe linked to edp transcoder\n");
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ trans_edp_pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ trans_edp_pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ trans_edp_pipe = PIPE_C;
+ break;
}
+
+ if (trans_edp_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
- I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
- /* pipesrc and dspsize control the size that is scaled from, which should
- * always be the user's requested size.
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
+ return false;
+
+ tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ /*
+ * Haswell has only FDI/PCH transcoder A. It is which is connected to
+ * DDI E. So just check whether this pipe is wired to DDI E and whether
+ * the PCH transcoder is on.
*/
- if (!IS_IRONLAKE(dev)) {
- I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
- (mode->hdisplay - 1));
- I915_WRITE(dsppos_reg, 0);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+ if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
+ I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
}
- I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
- I915_WRITE(link_m1_reg, m_n.link_m);
- I915_WRITE(link_n1_reg, m_n.link_n);
+ intel_get_pipe_timings(crtc, pipe_config);
- if (is_edp) {
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- } else {
- /* enable FDI RX PLL too */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
- udelay(200);
- }
+ pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+ if (intel_display_power_enabled(dev_priv, pfit_domain))
+ ironlake_get_pfit_config(crtc, pipe_config);
+
+ if (IS_HASWELL(dev))
+ pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+ (I915_READ(IPS_CTL) & IPS_ENABLE);
+
+ pipe_config->pixel_multiplier = 1;
+
+ return true;
+}
+
+static struct {
+ int clock;
+ u32 config;
+} hdmi_audio_clock[] = {
+ { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+ { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+ { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+ { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+ { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+ { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+ { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+ { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+ { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+ if (mode->clock == hdmi_audio_clock[i].clock)
+ break;
}
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_READ(pipeconf_reg);
+ if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+ DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+ i = 1;
+ }
+
+ DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
- intel_wait_for_vblank(dev);
+ return hdmi_audio_clock[i].config;
+}
- if (IS_IRONLAKE(dev)) {
- /* enable address swizzle for tiling buffer */
- temp = I915_READ(DISP_ARB_CTL);
- I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t i;
+
+ i = I915_READ(reg_eldv);
+ i &= bits_eldv;
+
+ if (!eld[0])
+ return !i;
+
+ if (!i)
+ return false;
+
+ i = I915_READ(reg_elda);
+ i &= ~bits_elda;
+ I915_WRITE(reg_elda, i);
+
+ for (i = 0; i < eld[2]; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
+static void g4x_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t len;
+ uint32_t i;
+
+ i = I915_READ(G4X_AUD_VID_DID);
+
+ if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i &= ~(eldv | G4X_ELD_ADDR);
+ len = (i >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+
+ if (!eld[0])
+ return;
+
+ len = min_t(uint8_t, eld[2], len);
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+}
+
+static void haswell_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ int tmp;
+
+ int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+ int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+ int aud_config = HSW_AUD_CFG(pipe);
+ int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+ /* Audio output enable */
+ DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ POSTING_READ(aud_cntrl_st2);
+
+ assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
+
+ /* Set ELD valid state */
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
+ tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
+
+ /* Enable HDMI mode */
+ tmp = I915_READ(aud_config);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
+ /* clear N_programing_enable and N_value_index */
+ tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+ I915_WRITE(aud_config, tmp);
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else {
+ I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
}
- I915_WRITE(dspcntr_reg, dspcntr);
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
- /* Flush the plane changes */
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ if (!eld[0])
+ return;
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ DRM_DEBUG_DRIVER("port num:%d\n", i);
- intel_update_watermarks(dev);
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
- drm_vblank_post_modeset(dev, pipe);
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
- return ret;
}
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void intel_crtc_load_lut(struct drm_crtc *crtc)
+static void ironlake_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int hdmiw_hdmiedid;
+ int aud_config;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+ int pipe = to_intel_crtc(crtc)->pipe;
+
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(connector->dev)) {
+ hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ if (IS_VALLEYVIEW(connector->dev)) {
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+
+ intel_encoder = intel_attached_encoder(connector);
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ i = intel_dig_port->port;
+ } else {
+ i = I915_READ(aud_cntl_st);
+ i = (i >> 29) & DIP_PORT_SEL_MASK;
+ /* DIP_Port_Select, 0x1 = PortB */
+ }
+
+ if (!i) {
+ DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
+ /* operate blindly on all ports */
+ eldv = IBX_ELD_VALIDB;
+ eldv |= IBX_ELD_VALIDB << 4;
+ eldv |= IBX_ELD_VALIDB << 8;
+ } else {
+ DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
+ eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else {
+ I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+ }
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+}
+
+void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ connector->encoder->base.id,
+ connector->encoder->name);
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.write_eld)
+ dev_priv->display.write_eld(connector, crtc, mode);
+}
+
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
- int i;
+ uint32_t cntl;
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
+ if (base != intel_crtc->cursor_base) {
+ /* On these chipsets we can only modify the base whilst
+ * the cursor is disabled.
+ */
+ if (intel_crtc->cursor_cntl) {
+ I915_WRITE(_CURACNTR, 0);
+ POSTING_READ(_CURACNTR);
+ intel_crtc->cursor_cntl = 0;
+ }
- /* use legacy palette for Ironlake */
- if (IS_IRONLAKE(dev))
- palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
- LGC_PALETTE_B;
+ I915_WRITE(_CURABASE, base);
+ POSTING_READ(_CURABASE);
+ }
- for (i = 0; i < 256; i++) {
- I915_WRITE(palreg + 4 * i,
- (intel_crtc->lut_r[i] << 16) |
- (intel_crtc->lut_g[i] << 8) |
- intel_crtc->lut_b[i]);
+ /* XXX width must be 64, stride 256 => 0x00 << 28 */
+ cntl = 0;
+ if (base)
+ cntl = (CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB);
+ if (intel_crtc->cursor_cntl != cntl) {
+ I915_WRITE(_CURACNTR, cntl);
+ POSTING_READ(_CURACNTR);
+ intel_crtc->cursor_cntl = cntl;
}
}
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t cntl;
+
+ cntl = 0;
+ if (base) {
+ cntl = MCURSOR_GAMMA_ENABLE;
+ switch (intel_crtc->cursor_width) {
+ case 64:
+ cntl |= CURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= CURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= CURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ }
+ if (intel_crtc->cursor_cntl != cntl) {
+ I915_WRITE(CURCNTR(pipe), cntl);
+ POSTING_READ(CURCNTR(pipe));
+ intel_crtc->cursor_cntl = cntl;
+ }
+
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
+}
+
+static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t cntl;
+
+ cntl = 0;
+ if (base) {
+ cntl = MCURSOR_GAMMA_ENABLE;
+ switch (intel_crtc->cursor_width) {
+ case 64:
+ cntl |= CURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= CURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= CURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ }
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+ if (intel_crtc->cursor_cntl != cntl) {
+ I915_WRITE(CURCNTR(pipe), cntl);
+ POSTING_READ(CURCNTR(pipe));
+ intel_crtc->cursor_cntl = cntl;
+ }
+
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
+}
+
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+ bool on)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int x = intel_crtc->cursor_x;
+ int y = intel_crtc->cursor_y;
+ u32 base = 0, pos = 0;
+
+ if (on)
+ base = intel_crtc->cursor_addr;
+
+ if (x >= intel_crtc->config.pipe_src_w)
+ base = 0;
+
+ if (y >= intel_crtc->config.pipe_src_h)
+ base = 0;
+
+ if (x < 0) {
+ if (x + intel_crtc->cursor_width <= 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ if (y + intel_crtc->cursor_height <= 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ if (base == 0 && intel_crtc->cursor_base == 0)
+ return;
+
+ I915_WRITE(CURPOS(pipe), pos);
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
+ ivb_update_cursor(crtc, base);
+ else if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
+ intel_crtc->cursor_base = base;
+}
+
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
+ struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_gem_object *bo;
- struct drm_i915_gem_object *obj_priv;
- int pipe = intel_crtc->pipe;
- uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
- uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp = I915_READ(control);
- size_t addr;
+ struct drm_i915_gem_object *obj;
+ unsigned old_width;
+ uint32_t addr;
int ret;
- DRM_DEBUG_KMS("\n");
-
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- temp |= CURSOR_MODE_DISABLE;
- } else {
- temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- }
addr = 0;
- bo = NULL;
+ obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
- /* Currently we only support 64x64 cursors */
- if (width != 64 || height != 64) {
- DRM_ERROR("we currently only support 64x64 cursors\n");
+ /* Check for which cursor types we support */
+ if (!((width == 64 && height == 64) ||
+ (width == 128 && height == 128 && !IS_GEN2(dev)) ||
+ (width == 256 && height == 256 && !IS_GEN2(dev)))) {
+ DRM_DEBUG("Cursor dimension not supported\n");
return -EINVAL;
}
- bo = drm_gem_object_lookup(dev, file_priv, handle);
- if (!bo)
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL)
return -ENOENT;
- obj_priv = bo->driver_private;
-
- if (bo->size < width * height * 4) {
- DRM_ERROR("buffer is to small\n");
+ if (obj->base.size < width * height * 4) {
+ DRM_DEBUG_KMS("buffer is to small\n");
ret = -ENOMEM;
goto fail;
}
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
- if (!dev_priv->info->cursor_needs_physical) {
- ret = i915_gem_object_pin(bo, PAGE_SIZE);
+ if (!INTEL_INFO(dev)->cursor_needs_physical) {
+ unsigned alignment;
+
+ if (obj->tiling_mode) {
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ /* Note that the w/a also requires 2 PTE of padding following
+ * the bo. We currently fill all unused PTE with the shadow
+ * page and so we should always have valid PTE following the
+ * cursor preventing the VT-d warning.
+ */
+ alignment = 0;
+ if (need_vtd_wa(dev))
+ alignment = 64*1024;
+
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
- DRM_ERROR("failed to pin cursor bo\n");
+ DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
goto fail_locked;
}
- addr = obj_priv->gtt_offset;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to release fence for cursor");
+ goto fail_unpin;
+ }
+
+ addr = i915_gem_obj_ggtt_offset(obj);
} else {
- ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_object_attach_phys(obj, align);
if (ret) {
- DRM_ERROR("failed to attach phys object\n");
+ DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
- addr = obj_priv->phys_obj->handle->busaddr;
+ addr = obj->phys_handle->busaddr;
}
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
- /* Hooray for CUR*CNTR differences */
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
- temp |= (pipe << 28); /* Connect to correct pipe */
- } else {
- temp &= ~(CURSOR_FORMAT_MASK);
- temp |= CURSOR_ENABLE;
- temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
- }
-
finish:
- I915_WRITE(control, temp);
- I915_WRITE(base, addr);
-
if (intel_crtc->cursor_bo) {
- if (dev_priv->info->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != bo)
- i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
- } else
- i915_gem_object_unpin(intel_crtc->cursor_bo);
- drm_gem_object_unreference(intel_crtc->cursor_bo);
+ if (!INTEL_INFO(dev)->cursor_needs_physical)
+ i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
mutex_unlock(&dev->struct_mutex);
+ old_width = intel_crtc->cursor_width;
+
intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = bo;
+ intel_crtc->cursor_bo = obj;
+ intel_crtc->cursor_width = width;
+ intel_crtc->cursor_height = height;
+
+ if (intel_crtc->active) {
+ if (old_width != width)
+ intel_update_watermarks(crtc);
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+ }
return 0;
-fail:
- mutex_lock(&dev->struct_mutex);
+fail_unpin:
+ i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
- drm_gem_object_unreference(bo);
mutex_unlock(&dev->struct_mutex);
+fail:
+ drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- int pipe = intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t adder;
- if (crtc->fb) {
- intel_fb = to_intel_framebuffer(crtc->fb);
- intel_mark_busy(dev, intel_fb->obj);
- }
+ intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
+ intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
- if (x < 0) {
- temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
+ if (intel_crtc->active)
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+
+ return 0;
+}
+
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ int end = (start + size > 256) ? 256 : start + size, i;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ for (i = start; i < end; i++) {
+ intel_crtc->lut_r[i] = red[i] >> 8;
+ intel_crtc->lut_g[i] = green[i] >> 8;
+ intel_crtc->lut_b[i] = blue[i] >> 8;
}
- if (y < 0) {
- temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
+
+ intel_crtc_load_lut(crtc);
+}
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return ERR_PTR(-ENOMEM);
}
- temp |= x << CURSOR_X_SHIFT;
- temp |= y << CURSOR_Y_SHIFT;
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+ if (ret)
+ goto err;
- adder = intel_crtc->cursor_addr;
- I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
- I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+ return &intel_fb->base;
+err:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ kfree(intel_fb);
- return 0;
+ return ERR_PTR(ret);
}
-/** Sets the color ramps on behalf of RandR */
-void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
+static struct drm_framebuffer *
+intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+ fb = __intel_framebuffer_create(dev, mode_cmd, obj);
+ mutex_unlock(&dev->struct_mutex);
- intel_crtc->lut_r[regno] = red >> 8;
- intel_crtc->lut_g[regno] = green >> 8;
- intel_crtc->lut_b[regno] = blue >> 8;
+ return fb;
}
-void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
+static u32
+intel_framebuffer_pitch_for_width(int width, int bpp)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 pitch = DIV_ROUND_UP(width * bpp, 8);
+ return ALIGN(pitch, 64);
+}
- *red = intel_crtc->lut_r[regno] << 8;
- *green = intel_crtc->lut_g[regno] << 8;
- *blue = intel_crtc->lut_b[regno] << 8;
+static u32
+intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+{
+ u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
+ return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
}
-static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+static struct drm_framebuffer *
+intel_framebuffer_create_for_mode(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ int depth, int bpp)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int i;
+ struct drm_i915_gem_object *obj;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- if (size != 256)
- return;
+ obj = i915_gem_alloc_object(dev,
+ intel_framebuffer_size_for_mode(mode, bpp));
+ if (obj == NULL)
+ return ERR_PTR(-ENOMEM);
- for (i = 0; i < 256; i++) {
- intel_crtc->lut_r[i] = red[i] >> 8;
- intel_crtc->lut_g[i] = green[i] >> 8;
- intel_crtc->lut_b[i] = blue[i] >> 8;
- }
+ mode_cmd.width = mode->hdisplay;
+ mode_cmd.height = mode->vdisplay;
+ mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+ bpp);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
- intel_crtc_load_lut(crtc);
+ return intel_framebuffer_create(dev, &mode_cmd, obj);
}
-/**
- * Get a pipe with a simple mode set on it for doing load-based monitor
- * detection.
- *
- * It will be up to the load-detect code to adjust the pipe as appropriate for
- * its requirements. The pipe will be connected to no other outputs.
- *
- * Currently this code will only succeed if there is a pipe with no outputs
- * configured for it. In the future, it could choose to temporarily disable
- * some outputs to free up a pipe for its use.
- *
- * \return crtc, or NULL if no pipes are available.
- */
+static struct drm_framebuffer *
+mode_fits_in_fbdev(struct drm_device *dev,
+ struct drm_display_mode *mode)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_framebuffer *fb;
-/* VESA 640x480x72Hz mode to set on the pipe */
-static struct drm_display_mode load_detect_mode = {
- DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-};
+ if (!dev_priv->fbdev)
+ return NULL;
+
+ if (!dev_priv->fbdev->fb)
+ return NULL;
+
+ obj = dev_priv->fbdev->fb->obj;
+ BUG_ON(!obj);
-struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
- struct drm_display_mode *mode,
- int *dpms_mode)
+ fb = &dev_priv->fbdev->fb->base;
+ if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel))
+ return NULL;
+
+ if (obj->base.size < mode->vdisplay * fb->pitches[0])
+ return NULL;
+
+ return fb;
+#else
+ return NULL;
+#endif
+}
+
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *intel_crtc;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_crtc *possible_crtc;
- struct drm_crtc *supported_crtc =NULL;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_crtc_helper_funcs *crtc_funcs;
- int i = -1;
+ struct drm_framebuffer *fb;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret, i = -1;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
+
+ drm_modeset_acquire_init(ctx, 0);
+
+retry:
+ ret = drm_modeset_lock(&config->connection_mutex, ctx);
+ if (ret)
+ goto fail_unlock;
/*
* Algorithm gets a little messy:
+ *
* - if the connector already has an assigned crtc, use it (but make
* sure it's on first)
+ *
* - try to find the first unused crtc that can drive this connector,
* and use that if we find one
- * - if there are no unused crtcs available, try to use the first
- * one we found that supports the connector
*/
/* See if we already have a CRTC for this connector */
if (encoder->crtc) {
crtc = encoder->crtc;
+
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
+
+ old->dpms_mode = connector->dpms;
+ old->load_detect_temp = false;
+
/* Make sure the crtc and connector are running */
- intel_crtc = to_intel_crtc(crtc);
- *dpms_mode = intel_crtc->dpms_mode;
- if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
- crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
- return crtc;
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+ return true;
}
/* Find an unused one (if possible) */
- list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, possible_crtc) {
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
@@ -3611,82 +8363,151 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
crtc = possible_crtc;
break;
}
- if (!supported_crtc)
- supported_crtc = possible_crtc;
}
/*
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
- return NULL;
+ DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ goto fail_unlock;
}
- encoder->crtc = crtc;
- intel_output->base.encoder = encoder;
- intel_output->load_detect_temp = true;
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
+ intel_encoder->new_crtc = to_intel_crtc(crtc);
+ to_intel_connector(connector)->new_encoder = intel_encoder;
intel_crtc = to_intel_crtc(crtc);
- *dpms_mode = intel_crtc->dpms_mode;
+ intel_crtc->new_enabled = true;
+ intel_crtc->new_config = &intel_crtc->config;
+ old->dpms_mode = connector->dpms;
+ old->load_detect_temp = true;
+ old->release_fb = NULL;
- if (!crtc->enabled) {
- if (!mode)
- mode = &load_detect_mode;
- drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
- } else {
- if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
- crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- }
+ if (!mode)
+ mode = &load_detect_mode;
+
+ /* We need a framebuffer large enough to accommodate all accesses
+ * that the plane may generate whilst we perform load detection.
+ * We can not rely on the fbcon either being present (we get called
+ * during its initialisation to detect all boot displays, or it may
+ * not even exist) or that it is large enough to satisfy the
+ * requested mode.
+ */
+ fb = mode_fits_in_fbdev(dev, mode);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
+ fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+ old->release_fb = fb;
+ } else
+ DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ goto fail;
+ }
- /* Add this connector to the crtc */
- encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
- encoder_funcs->commit(encoder);
+ if (intel_set_mode(crtc, mode, 0, 0, fb)) {
+ DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+ if (old->release_fb)
+ old->release_fb->funcs->destroy(old->release_fb);
+ goto fail;
}
+
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ return true;
+
+ fail:
+ intel_crtc->new_enabled = crtc->enabled;
+ if (intel_crtc->new_enabled)
+ intel_crtc->new_config = &intel_crtc->config;
+ else
+ intel_crtc->new_config = NULL;
+fail_unlock:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
- return crtc;
+ return false;
}
-void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_encoder *encoder = &intel_output->enc;
- struct drm_device *dev = encoder->dev;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_output->load_detect_temp) {
- encoder->crtc = NULL;
- intel_output->base.encoder = NULL;
- intel_output->load_detect_temp = false;
- crtc->enabled = drm_helper_crtc_in_use(crtc);
- drm_helper_disable_unused_functions(dev);
- }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
- /* Switch crtc and output back off if necessary */
- if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
- if (encoder->crtc == crtc)
- encoder_funcs->dpms(encoder, dpms_mode);
- crtc_funcs->dpms(crtc, dpms_mode);
+ if (old->load_detect_temp) {
+ to_intel_connector(connector)->new_encoder = NULL;
+ intel_encoder->new_crtc = NULL;
+ intel_crtc->new_enabled = false;
+ intel_crtc->new_config = NULL;
+ intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+ if (old->release_fb) {
+ drm_framebuffer_unregister_private(old->release_fb);
+ drm_framebuffer_unreference(old->release_fb);
+ }
+
+ goto unlock;
+ return;
}
+
+ /* Switch crtc and encoder back off if necessary */
+ if (old->dpms_mode != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, old->dpms_mode);
+
+unlock:
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+}
+
+static int i9xx_pll_refclk(struct drm_device *dev,
+ const struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+ if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return dev_priv->vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(dev))
+ return 120000;
+ else if (!IS_GEN2(dev))
+ return 96000;
+ else
+ return 48000;
}
/* Returns the clock of the currently programmed mode of the given pipe. */
-static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ int pipe = pipe_config->cpu_transcoder;
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
intel_clock_t clock;
+ int refclk = i9xx_pll_refclk(dev, pipe_config);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = pipe_config->dpll_hw_state.fp0;
else
- fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
@@ -3697,7 +8518,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (IS_PINEVIEW(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -3717,25 +8538,25 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
- return 0;
+ return;
}
- /* XXX: Handle the 100Mhz refclk */
- intel_clock(dev, 96000, &clock);
+ if (IS_PINEVIEW(dev))
+ pineview_clock(refclk, &clock);
+ else
+ i9xx_clock(refclk, &clock);
} else {
- bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+ u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+ bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
- clock.p2 = 14;
- if ((dpll & PLL_REF_INPUT_MASK) ==
- PLLB_REF_INPUT_SPREADSPECTRUMIN) {
- /* XXX: might not be 66MHz */
- intel_clock(dev, 66000, &clock);
- } else
- intel_clock(dev, 48000, &clock);
+ if (lvds & LVDS_CLKB_POWER_UP)
+ clock.p2 = 7;
+ else
+ clock.p2 = 14;
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -3747,17 +8568,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
clock.p2 = 4;
else
clock.p2 = 2;
-
- intel_clock(dev, 48000, &clock);
}
+
+ i9xx_clock(refclk, &clock);
}
- /* XXX: It would be nice to validate the clocks, but we can't reuse
- * i830PllIsValid() because it relies on the xf86_config connector
- * configuration being accurate, which it isn't necessarily.
+ /*
+ * This value includes pixel_multiplier. We will use
+ * port_clock to compute adjusted_mode.crtc_clock in the
+ * encoder's get_config() function.
+ */
+ pipe_config->port_clock = clock.dot;
+}
+
+int intel_dotclock_calculate(int link_freq,
+ const struct intel_link_m_n *m_n)
+{
+ /*
+ * The calculation for the data clock is:
+ * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
+ * But we want to avoid losing precison if possible, so:
+ * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
+ *
+ * and the link clock is simpler:
+ * link_clock = (m * link_clock) / n
*/
- return clock.dot;
+ if (!m_n->link_n)
+ return 0;
+
+ return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+}
+
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+
+ /* read out port_clock from the DPLL */
+ i9xx_crtc_clock_get(crtc, pipe_config);
+
+ /*
+ * This value does not include pixel_multiplier.
+ * We will check that port_clock and adjusted_mode.crtc_clock
+ * agree once we know their relationship in the encoder's
+ * get_config() function.
+ */
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+ &pipe_config->fdi_m_n);
}
/** Returns the currently programmed mode of the given pipe. */
@@ -3766,18 +8625,34 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *mode;
- int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ struct intel_crtc_config pipe_config;
+ int htot = I915_READ(HTOTAL(cpu_transcoder));
+ int hsync = I915_READ(HSYNC(cpu_transcoder));
+ int vtot = I915_READ(VTOTAL(cpu_transcoder));
+ int vsync = I915_READ(VSYNC(cpu_transcoder));
+ enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
- mode->clock = intel_crtc_clock_get(dev, crtc);
+ /*
+ * Construct a pipe_config sufficient for getting the clock info
+ * back out of crtc_clock_get.
+ *
+ * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
+ * to use a real value here instead.
+ */
+ pipe_config.cpu_transcoder = (enum transcoder) pipe;
+ pipe_config.pixel_multiplier = 1;
+ pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+ pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+ pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
+ i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+
+ mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -3788,90 +8663,48 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
return mode;
}
-#define GPU_IDLE_TIMEOUT 500 /* ms */
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(unsigned long arg)
-{
- struct drm_device *dev = (struct drm_device *)arg;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
- dev_priv->busy = false;
-
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-
-static void intel_crtc_idle_timer(unsigned long arg)
-{
- struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
- struct drm_crtc *crtc = &intel_crtc->base;
- drm_i915_private_t *dev_priv = crtc->dev->dev_private;
-
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
- intel_crtc->busy = false;
-
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll = I915_READ(dpll_reg);
+ int dpll_reg = DPLL(pipe);
+ int dpll;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
+ dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
- /* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+ assert_panel_unlocked(dev_priv, pipe);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
- dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
+
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-
- /* ...and lock them again */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
-
- /* Schedule downclock */
- if (schedule)
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll = I915_READ(dpll_reg);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3882,141 +8715,136 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
DRM_DEBUG_DRIVER("downclocking LVDS\n");
- /* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+ assert_panel_unlocked(dev_priv, pipe);
+ dpll = I915_READ(dpll_reg);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
- dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev);
+ intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
-
- /* ...and lock them again */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
}
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle. Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(struct work_struct *work)
+void intel_mark_busy(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- idle_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mm.busy)
+ return;
+
+ intel_runtime_pm_get(dev_priv);
+ i915_update_gfx_val(dev_priv);
+ dev_priv->mm.busy = true;
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
- if (!i915_powersave)
+ if (!dev_priv->mm.busy)
return;
- mutex_lock(&dev->struct_mutex);
+ dev_priv->mm.busy = false;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- /* Skip inactive CRTCs */
- if (!crtc->fb)
+ if (!i915.powersave)
+ goto out;
+
+ for_each_crtc(dev, crtc) {
+ if (!crtc->primary->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->busy)
- intel_decrease_pllclock(crtc);
+ intel_decrease_pllclock(crtc);
}
- mutex_unlock(&dev->struct_mutex);
+ if (INTEL_INFO(dev)->gen >= 6)
+ gen6_rps_idle(dev->dev_private);
+
+out:
+ intel_runtime_pm_put(dev_priv);
}
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL;
- struct intel_framebuffer *intel_fb;
- struct intel_crtc *intel_crtc;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_crtc *crtc;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!i915.powersave)
return;
- if (!dev_priv->busy)
- dev_priv->busy = true;
- else
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ for_each_crtc(dev, crtc) {
+ if (!crtc->primary->fb)
+ continue;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (!crtc->fb)
+ if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
continue;
- intel_crtc = to_intel_crtc(crtc);
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb->obj == obj) {
- if (!intel_crtc->busy) {
- /* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc, true);
- intel_crtc->busy = true;
- } else {
- /* Busy -> busy, put off timer */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- }
- }
+ intel_increase_pllclock(crtc);
+ if (ring && intel_fbc_enabled(dev))
+ ring->fbc_dirty = true;
}
}
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (work) {
+ cancel_work_sync(&work->work);
+ kfree(work);
+ }
+
+ intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
drm_crtc_cleanup(crtc);
+
kfree(intel_crtc);
}
-struct intel_unpin_work {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_gem_object *obj;
- struct drm_pending_vblank_event *event;
- int pending;
-};
-
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
+ struct drm_device *dev = work->crtc->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+ atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
- mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->obj);
- drm_gem_object_unreference(work->obj);
- mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
-void intel_finish_page_flip(struct drm_device *dev, int pipe)
+static void do_intel_finish_page_flip(struct drm_device *dev,
+ struct drm_crtc *crtc)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- struct drm_i915_gem_object *obj_priv;
- struct drm_pending_vblank_event *e;
- struct timeval now;
unsigned long flags;
/* Ignore early vblank irqs */
@@ -4025,204 +8853,2186 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || !work->pending) {
+
+ /* Ensure we don't miss a work->pending update ... */
+ smp_rmb();
+
+ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
+ /* and that the unpin work is consistent wrt ->pending. */
+ smp_rmb();
+
intel_crtc->unpin_work = NULL;
- drm_vblank_put(dev, intel_crtc->pipe);
- if (work->event) {
- e = work->event;
- do_gettimeofday(&now);
- e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- list_add_tail(&e->base.link,
- &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
+ if (work->event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
+
+ drm_crtc_vblank_put(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = work->obj->driver_private;
- if (atomic_dec_and_test(&obj_priv->pending_flip))
- DRM_WAKEUP(&dev_priv->pending_flip_queue);
- schedule_work(&work->work);
+ wake_up_all(&dev_priv->pending_flip_queue);
+
+ queue_work(dev_priv->wq, &work->work);
+
+ trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+/* Is 'a' after or equal to 'b'? */
+static bool g4x_flip_count_after_eq(u32 a, u32 b)
+{
+ return !((a - b) & 0x80000000);
+}
+
+static bool page_flip_finished(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * The relevant registers doen't exist on pre-ctg.
+ * As the flip done interrupt doesn't trigger for mmio
+ * flips on gmch platforms, a flip count check isn't
+ * really needed there. But since ctg has the registers,
+ * include it in the check anyway.
+ */
+ if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+ return true;
+
+ /*
+ * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
+ * used the same base address. In that case the mmio flip might
+ * have completed, but the CS hasn't even executed the flip yet.
+ *
+ * A flip count check isn't enough as the CS might have updated
+ * the base address just after start of vblank, but before we
+ * managed to process the interrupt. This means we'd complete the
+ * CS flip too soon.
+ *
+ * Combining both checks should get us a good enough result. It may
+ * still happen that the CS flip has been executed, but has not
+ * yet actually completed. But in case the base address is the same
+ * anyway, we don't really care.
+ */
+ return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
+ crtc->unpin_work->gtt_offset &&
+ g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
+ crtc->unpin_work->flip_count);
}
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
+ /* NB: An MMIO update of the plane base pointer will also
+ * generate a page-flip completion irq, i.e. every modeset
+ * is also accompanied by a spurious intel_prepare_page_flip().
+ */
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work)
- intel_crtc->unpin_work->pending = 1;
+ if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
+ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+ /* Ensure that the work item is consistent when activating it ... */
+ smp_wmb();
+ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+ /* and that it is marked active as soon as the irq could fire. */
+ smp_wmb();
+}
+
+static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
+ return 0;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
+ return 0;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+ obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
+ return 0;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+
+ /* Contrary to the suggestions in the documentation,
+ * "Enable Panel Fitter" does not seem to be required when page
+ * flipping with a non-native mode, and worse causes a normal
+ * modeset to fail.
+ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
+ return 0;
+}
+
+static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t plane_bit = 0;
+ int len, ret;
+
+ switch (intel_crtc->plane) {
+ case PLANE_A:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+ break;
+ case PLANE_B:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+ break;
+ case PLANE_C:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ return -ENODEV;
+ }
+
+ len = 4;
+ if (ring->id == RCS) {
+ len += 6;
+ /*
+ * On Gen 8, SRM is now taking an extra dword to accommodate
+ * 48bits addresses, and we need a NOOP for the batch size to
+ * stay even.
+ */
+ if (IS_GEN8(dev))
+ len += 2;
+ }
+
+ /*
+ * BSpec MI_DISPLAY_FLIP for IVB:
+ * "The full packet must be contained within the same cache line."
+ *
+ * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+ * cacheline, if we ever start emitting more commands before
+ * the MI_DISPLAY_FLIP we may need to first emit everything else,
+ * then do the cacheline alignment, and finally emit the
+ * MI_DISPLAY_FLIP.
+ */
+ ret = intel_ring_cacheline_align(ring);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(ring, len);
+ if (ret)
+ return ret;
+
+ /* Unmask the flip-done completion message. Note that the bspec says that
+ * we should do this for both the BCS and RCS, and that we must not unmask
+ * more than one flip event at any time (or ensure that one flip message
+ * can be sent by waiting for flip-done prior to queueing new flips).
+ * Experimentation says that BCS works despite DERRMR masking all
+ * flip-done completion events and that unmasking all planes at once
+ * for the RCS also doesn't appear to drop events. Setting the DERRMR
+ * to zero does lead to lockups within MI_DISPLAY_FLIP.
+ */
+ if (ring->id == RCS) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ if (IS_GEN8(dev))
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
+ else
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ if (IS_GEN8(dev)) {
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ }
+ }
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(ring, (MI_NOOP));
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
+ return 0;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ return -ENODEV;
+}
+
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
+ struct intel_engine_cs *ring;
unsigned long flags;
int ret;
- RING_LOCALS;
- work = kzalloc(sizeof *work, GFP_KERNEL);
+ /* Can't change pixel format via MI display flips. */
+ if (fb->pixel_format != crtc->primary->fb->pixel_format)
+ return -EINVAL;
+
+ /*
+ * TILEOFF/LINOFF registers can't be changed via MI display flips.
+ * Note that pitch changes could also affect these register.
+ */
+ if (INTEL_INFO(dev)->gen > 3 &&
+ (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
+ fb->pitches[0] != crtc->primary->fb->pitches[0]))
+ return -EINVAL;
+
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ goto out_hang;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
- mutex_lock(&dev->struct_mutex);
-
work->event = event;
- work->dev = crtc->dev;
- intel_fb = to_intel_framebuffer(crtc->fb);
- work->obj = intel_fb->obj;
+ work->crtc = crtc;
+ work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
- mutex_unlock(&dev->struct_mutex);
+ drm_crtc_vblank_put(crtc);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
+ if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+ flush_workqueue(dev_priv->wq);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
- if (ret != 0) {
- kfree(work);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
+
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
+
+ crtc->primary->fb = fb;
- /* Reference the old fb object for the scheduled work. */
- drm_gem_object_reference(work->obj);
+ work->pending_flip_obj = obj;
- crtc->fb = fb;
- i915_gem_object_flush_write_domain(obj);
- drm_vblank_get(dev, intel_crtc->pipe);
- obj_priv = obj->driver_private;
- atomic_inc(&obj_priv->pending_flip);
+ work->enable_stall_check = true;
- BEGIN_LP_RING(4);
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- if (IS_I965G(dev)) {
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
- OUT_RING((fb->width << 16) | fb->height);
+ atomic_inc(&intel_crtc->unpin_work_count);
+ intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+
+ if (IS_VALLEYVIEW(dev)) {
+ ring = &dev_priv->ring[BCS];
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ ring = obj->ring;
+ if (ring == NULL || ring->id != RCS)
+ ring = &dev_priv->ring[BCS];
} else {
- OUT_RING(obj_priv->gtt_offset);
- OUT_RING(MI_NOOP);
+ ring = &dev_priv->ring[RCS];
}
- ADVANCE_LP_RING();
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto cleanup_pending;
+
+ work->gtt_offset =
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (ret)
+ goto cleanup_unpin;
+
+ intel_disable_fbc(dev);
+ intel_mark_fb_busy(obj, NULL);
mutex_unlock(&dev->struct_mutex);
+ trace_i915_flip_request(intel_crtc->plane, obj);
+
return 0;
+
+cleanup_unpin:
+ intel_unpin_fb_obj(obj);
+cleanup_pending:
+ atomic_dec(&intel_crtc->unpin_work_count);
+ crtc->primary->fb = old_fb;
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+cleanup:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ drm_crtc_vblank_put(crtc);
+free_work:
+ kfree(work);
+
+ if (ret == -EIO) {
+out_hang:
+ intel_crtc_wait_for_pending_flips(crtc);
+ ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+ if (ret == 0 && event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ }
+ return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .prepare = intel_crtc_prepare,
- .commit = intel_crtc_commit,
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
};
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->new_encoder =
+ to_intel_encoder(connector->base.encoder);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ }
+
+ for_each_intel_crtc(dev, crtc) {
+ crtc->new_enabled = crtc->base.enabled;
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->base.encoder = &connector->new_encoder->base;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->base.crtc = &encoder->new_crtc->base;
+ }
+
+ for_each_intel_crtc(dev, crtc) {
+ crtc->base.enabled = crtc->new_enabled;
+ }
+}
+
+static void
+connected_sink_compute_bpp(struct intel_connector *connector,
+ struct intel_crtc_config *pipe_config)
+{
+ int bpp = pipe_config->pipe_bpp;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
+ connector->base.base.id,
+ connector->base.name);
+
+ /* Don't use an invalid EDID bpc value */
+ if (connector->base.display_info.bpc &&
+ connector->base.display_info.bpc * 3 < bpp) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
+ bpp, connector->base.display_info.bpc*3);
+ pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ }
+
+ /* Clamp bpp to 8 on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0 && bpp > 24) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+ bpp);
+ pipe_config->pipe_bpp = 24;
+ }
+}
+
+static int
+compute_baseline_pipe_bpp(struct intel_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_connector *connector;
+ int bpp;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ bpp = 8*3; /* since we go through a colormap */
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ /* checked in intel_framebuffer_init already */
+ if (WARN_ON(INTEL_INFO(dev)->gen > 3))
+ return -EINVAL;
+ case DRM_FORMAT_RGB565:
+ bpp = 6*3; /* min is 18bpp */
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ /* checked in intel_framebuffer_init already */
+ if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+ return -EINVAL;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ bpp = 8*3;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ /* checked in intel_framebuffer_init already */
+ if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+ return -EINVAL;
+ bpp = 10*3;
+ break;
+ /* TODO: gen4+ supports 16 bpc floating point, too. */
+ default:
+ DRM_DEBUG_KMS("unsupported depth\n");
+ return -EINVAL;
+ }
+
+ pipe_config->pipe_bpp = bpp;
+
+ /* Clamp display bpp to EDID value */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder ||
+ connector->new_encoder->new_crtc != crtc)
+ continue;
+
+ connected_sink_compute_bpp(connector, pipe_config);
+ }
+
+ return bpp;
+}
+
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+}
+
+static void intel_dump_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ const char *context)
+{
+ DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
+ context, pipe_name(crtc->pipe));
+
+ DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
+ DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+ pipe_config->pipe_bpp, pipe_config->dither);
+ DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ pipe_config->has_pch_encoder,
+ pipe_config->fdi_lanes,
+ pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
+ pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
+ pipe_config->fdi_m_n.tu);
+ DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ pipe_config->has_dp_encoder,
+ pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
+ pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
+ pipe_config->dp_m_n.tu);
+ DRM_DEBUG_KMS("requested mode:\n");
+ drm_mode_debug_printmodeline(&pipe_config->requested_mode);
+ DRM_DEBUG_KMS("adjusted mode:\n");
+ drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+ DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
+ DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+ DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+ DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+ const struct intel_encoder *b)
+{
+ /* masks could be asymmetric, so check both ways */
+ return a == b || (a->cloneable & (1 << b->type) &&
+ b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *source_encoder;
+
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+ if (source_encoder->new_crtc != crtc)
+ continue;
+
+ if (!encoders_cloneable(encoder, source_encoder))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_encoder_cloning(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list, base.head) {
+ if (encoder->new_crtc != crtc)
+ continue;
+
+ if (!check_single_encoder_cloning(crtc, encoder))
+ return false;
+ }
+
+ return true;
+}
+
+static struct intel_crtc_config *
+intel_modeset_pipe_config(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_crtc_config *pipe_config;
+ int plane_bpp, ret = -EINVAL;
+ bool retry = true;
+
+ if (!check_encoder_cloning(to_intel_crtc(crtc))) {
+ DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ if (!pipe_config)
+ return ERR_PTR(-ENOMEM);
+
+ drm_mode_copy(&pipe_config->adjusted_mode, mode);
+ drm_mode_copy(&pipe_config->requested_mode, mode);
+
+ pipe_config->cpu_transcoder =
+ (enum transcoder) to_intel_crtc(crtc)->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ /*
+ * Sanitize sync polarity flags based on requested ones. If neither
+ * positive or negative polarity is requested, treat this as meaning
+ * negative polarity.
+ */
+ if (!(pipe_config->adjusted_mode.flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (!(pipe_config->adjusted_mode.flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+ /* Compute a starting value for pipe_config->pipe_bpp taking the source
+ * plane pixel format and any sink constraints into account. Returns the
+ * source plane bpp so that dithering can be selected on mismatches
+ * after encoders and crtc also have had their say. */
+ plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+ fb, pipe_config);
+ if (plane_bpp < 0)
+ goto fail;
+
+ /*
+ * Determine the real pipe dimensions. Note that stereo modes can
+ * increase the actual pipe size due to the frame doubling and
+ * insertion of additional space for blanks between the frame. This
+ * is stored in the crtc timings. We use the requested mode to do this
+ * computation to clearly distinguish it from the adjusted mode, which
+ * can be changed by the connectors in the below retry loop.
+ */
+ drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
+ pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
+ pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+
+encoder_retry:
+ /* Ensure the port clock defaults are reset when retrying. */
+ pipe_config->port_clock = 0;
+ pipe_config->pixel_multiplier = 1;
+
+ /* Fill in default crtc timings, allow encoders to overwrite them. */
+ drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+
+ if (!(encoder->compute_config(encoder, pipe_config))) {
+ DRM_DEBUG_KMS("Encoder config failure\n");
+ goto fail;
+ }
+ }
+
+ /* Set default port clock if not overwritten by the encoder. Needs to be
+ * done afterwards in case the encoder adjusts the mode. */
+ if (!pipe_config->port_clock)
+ pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+ * pipe_config->pixel_multiplier;
+
+ ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto fail;
+ }
+
+ if (ret == RETRY) {
+ if (WARN(!retry, "loop in pipe configuration computation\n")) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+ retry = false;
+ goto encoder_retry;
+ }
+
+ pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
+ DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
+ plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+
+ return pipe_config;
+fail:
+ kfree(pipe_config);
+ return ERR_PTR(ret);
+}
+
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+ unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_crtc *tmp_crtc;
+
+ *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+ /* Check which crtcs have changed outputs connected to them, these need
+ * to be part of the prepare_pipes mask. We don't (yet) support global
+ * modeset across multiple crtcs, so modeset_pipes will only have one
+ * bit set at most. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder == &connector->new_encoder->base)
+ continue;
+
+ if (connector->base.encoder) {
+ tmp_crtc = connector->base.encoder->crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (connector->new_encoder)
+ *prepare_pipes |=
+ 1 << connector->new_encoder->new_crtc->pipe;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc == &encoder->new_crtc->base)
+ continue;
+
+ if (encoder->base.crtc) {
+ tmp_crtc = encoder->base.crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (encoder->new_crtc)
+ *prepare_pipes |= 1 << encoder->new_crtc->pipe;
+ }
+
+ /* Check for pipes that will be enabled/disabled ... */
+ for_each_intel_crtc(dev, intel_crtc) {
+ if (intel_crtc->base.enabled == intel_crtc->new_enabled)
+ continue;
+
+ if (!intel_crtc->new_enabled)
+ *disable_pipes |= 1 << intel_crtc->pipe;
+ else
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+ }
+
+
+ /* set_mode is also used to update properties on life display pipes. */
+ intel_crtc = to_intel_crtc(crtc);
+ if (intel_crtc->new_enabled)
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+
+ /*
+ * For simplicity do a full modeset on any pipe where the output routing
+ * changed. We could be more clever, but that would require us to be
+ * more careful with calling the relevant encoder->mode_set functions.
+ */
+ if (*prepare_pipes)
+ *modeset_pipes = *prepare_pipes;
+
+ /* ... and mask these out. */
+ *modeset_pipes &= ~(*disable_pipes);
+ *prepare_pipes &= ~(*disable_pipes);
+
+ /*
+ * HACK: We don't (yet) fully support global modesets. intel_set_config
+ * obies this rule, but the modeset restore mode of
+ * intel_modeset_setup_hw_state does not.
+ */
+ *modeset_pipes &= 1 << intel_crtc->pipe;
+ *prepare_pipes &= 1 << intel_crtc->pipe;
+
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ *modeset_pipes, *prepare_pipes, *disable_pipes);
+}
+
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+
+ return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!intel_encoder->base.crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe))
+ intel_encoder->connectors_active = false;
+ }
+
+ intel_modeset_commit_output_state(dev);
+
+ /* Double check state. */
+ for_each_intel_crtc(dev, intel_crtc) {
+ WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+ WARN_ON(intel_crtc->new_config &&
+ intel_crtc->new_config != &intel_crtc->config);
+ WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_object_property_set_value(&connector->base,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
+
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+ }
+
+}
+
+static bool intel_fuzzy_clock_check(int clock1, int clock2)
+{
+ int diff;
+
+ if (clock1 == clock2)
+ return true;
+
+ if (!clock1 || !clock2)
+ return false;
+
+ diff = abs(clock1 - clock2);
+
+ if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+ return true;
+
+ return false;
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+ list_for_each_entry((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ if (mask & (1 <<(intel_crtc)->pipe))
+
+static bool
+intel_pipe_config_compare(struct drm_device *dev,
+ struct intel_crtc_config *current_config,
+ struct intel_crtc_config *pipe_config)
+{
+#define PIPE_CONF_CHECK_X(name) \
+ if (current_config->name != pipe_config->name) { \
+ DRM_ERROR("mismatch in " #name " " \
+ "(expected 0x%08x, found 0x%08x)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ return false; \
+ }
+
+#define PIPE_CONF_CHECK_I(name) \
+ if (current_config->name != pipe_config->name) { \
+ DRM_ERROR("mismatch in " #name " " \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ return false; \
+ }
+
+#define PIPE_CONF_CHECK_FLAGS(name, mask) \
+ if ((current_config->name ^ pipe_config->name) & (mask)) { \
+ DRM_ERROR("mismatch in " #name "(" #mask ") " \
+ "(expected %i, found %i)\n", \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
+ return false; \
+ }
+
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+ if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+ DRM_ERROR("mismatch in " #name " " \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ return false; \
+ }
+
+#define PIPE_CONF_QUIRK(quirk) \
+ ((current_config->quirks | pipe_config->quirks) & (quirk))
+
+ PIPE_CONF_CHECK_I(cpu_transcoder);
+
+ PIPE_CONF_CHECK_I(has_pch_encoder);
+ PIPE_CONF_CHECK_I(fdi_lanes);
+ PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
+ PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
+ PIPE_CONF_CHECK_I(fdi_m_n.link_m);
+ PIPE_CONF_CHECK_I(fdi_m_n.link_n);
+ PIPE_CONF_CHECK_I(fdi_m_n.tu);
+
+ PIPE_CONF_CHECK_I(has_dp_encoder);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+ PIPE_CONF_CHECK_I(dp_m_n.link_m);
+ PIPE_CONF_CHECK_I(dp_m_n.link_n);
+ PIPE_CONF_CHECK_I(dp_m_n.tu);
+
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
+
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
+
+ PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(has_hdmi_sink);
+ if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
+ IS_VALLEYVIEW(dev))
+ PIPE_CONF_CHECK_I(limited_color_range);
+
+ PIPE_CONF_CHECK_I(has_audio);
+
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_INTERLACE);
+
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_PHSYNC);
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_NHSYNC);
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_PVSYNC);
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_NVSYNC);
+ }
+
+ PIPE_CONF_CHECK_I(pipe_src_w);
+ PIPE_CONF_CHECK_I(pipe_src_h);
+
+ /*
+ * FIXME: BIOS likes to set up a cloned config with lvds+external
+ * screen. Since we don't yet re-compute the pipe config when moving
+ * just the lvds port away to another pipe the sw tracking won't match.
+ *
+ * Proper atomic modesets with recomputed global state will fix this.
+ * Until then just don't check gmch state for inherited modes.
+ */
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ }
+
+ PIPE_CONF_CHECK_I(pch_pfit.enabled);
+ if (current_config->pch_pfit.enabled) {
+ PIPE_CONF_CHECK_I(pch_pfit.pos);
+ PIPE_CONF_CHECK_I(pch_pfit.size);
+ }
+
+ /* BDW+ don't expose a synchronous way to read the state */
+ if (IS_HASWELL(dev))
+ PIPE_CONF_CHECK_I(ips_enabled);
+
+ PIPE_CONF_CHECK_I(double_wide);
+
+ PIPE_CONF_CHECK_I(shared_dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
+ PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+ PIPE_CONF_CHECK_I(pipe_bpp);
+
+ PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+
+#undef PIPE_CONF_CHECK_X
+#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
+#undef PIPE_CONF_QUIRK
+
+ return true;
+}
+
+static void
+check_connector_state(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* This also checks the encoder/connector hw state with the
+ * ->get_hw_state callbacks. */
+ intel_connector_check_state(connector);
+
+ WARN(&connector->new_encoder->base != connector->base.encoder,
+ "connector's staged encoder doesn't match current encoder\n");
+ }
+}
+
+static void
+check_encoder_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+ enum pipe pipe, tracked_pipe;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ "encoder's stage crtc doesn't match current crtc\n");
+ WARN(encoder->connectors_active && !encoder->base.crtc,
+ "encoder's active_connectors set, but no crtc\n");
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+ enabled = true;
+ if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ active = true;
+ }
+ WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch "
+ "(expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+ WARN(active && !encoder->base.crtc,
+ "active encoder with no crtc\n");
+
+ WARN(encoder->connectors_active != active,
+ "encoder's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ WARN(active != encoder->connectors_active,
+ "encoder's hw state doesn't match sw tracking "
+ "(expected %i, found %i)\n",
+ encoder->connectors_active, active);
+
+ if (!encoder->base.crtc)
+ continue;
+
+ tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ WARN(active && pipe != tracked_pipe,
+ "active encoder's pipe doesn't match"
+ "(expected %i, found %i)\n",
+ tracked_pipe, pipe);
+
+ }
+}
+
+static void
+check_crtc_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_crtc_config pipe_config;
+
+ for_each_intel_crtc(dev, crtc) {
+ bool enabled = false;
+ bool active = false;
+
+ memset(&pipe_config, 0, sizeof(pipe_config));
+
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.base.id);
+
+ WARN(crtc->active && !crtc->base.enabled,
+ "active crtc, but not enabled in sw tracking\n");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ enabled = true;
+ if (encoder->connectors_active)
+ active = true;
+ }
+
+ WARN(active != crtc->active,
+ "crtc's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, crtc->active);
+ WARN(enabled != crtc->base.enabled,
+ "crtc's computed enabled state doesn't match tracked enabled state "
+ "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+ active = dev_priv->display.get_pipe_config(crtc,
+ &pipe_config);
+
+ /* hw state is inconsistent with the pipe A quirk */
+ if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ active = crtc->active;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ enum pipe pipe;
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ if (encoder->get_hw_state(encoder, &pipe))
+ encoder->get_config(encoder, &pipe_config);
+ }
+
+ WARN(crtc->active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n", crtc->active, active);
+
+ if (active &&
+ !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
+ WARN(1, "pipe state doesn't match!\n");
+ intel_dump_pipe_config(crtc, &pipe_config,
+ "[hw state]");
+ intel_dump_pipe_config(crtc, &crtc->config,
+ "[sw state]");
+ }
+ }
+}
+
+static void
+check_shared_dpll_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct intel_dpll_hw_state dpll_hw_state;
+ int i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ int enabled_crtcs = 0, active_crtcs = 0;
+ bool active;
+
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+ DRM_DEBUG_KMS("%s\n", pll->name);
+
+ active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
+
+ WARN(pll->active > pll->refcount,
+ "more active pll users than references: %i vs %i\n",
+ pll->active, pll->refcount);
+ WARN(pll->active && !pll->on,
+ "pll in active use but not on in sw tracking\n");
+ WARN(pll->on && !pll->active,
+ "pll in on but not on in use in sw tracking\n");
+ WARN(pll->on != active,
+ "pll on state mismatch (expected %i, found %i)\n",
+ pll->on, active);
+
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
+ enabled_crtcs++;
+ if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+ active_crtcs++;
+ }
+ WARN(pll->active != active_crtcs,
+ "pll active crtcs mismatch (expected %i, found %i)\n",
+ pll->active, active_crtcs);
+ WARN(pll->refcount != enabled_crtcs,
+ "pll enabled crtcs mismatch (expected %i, found %i)\n",
+ pll->refcount, enabled_crtcs);
+
+ WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
+ sizeof(dpll_hw_state)),
+ "pll hw state mismatch\n");
+ }
+}
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+ check_connector_state(dev);
+ check_encoder_state(dev);
+ check_crtc_state(dev);
+ check_shared_dpll_state(dev);
+}
+
+void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ int dotclock)
+{
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees.
+ */
+ WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ pipe_config->adjusted_mode.crtc_clock, dotclock);
+}
+
+static void update_scanline_offset(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+
+ /*
+ * The scanline counter increments at the leading edge of hsync.
+ *
+ * On most platforms it starts counting from vtotal-1 on the
+ * first active line. That means the scanline counter value is
+ * always one less than what we would expect. Ie. just after
+ * start of vblank, which also occurs at start of hsync (on the
+ * last active line), the scanline counter will read vblank_start-1.
+ *
+ * On gen2 the scanline counter starts counting from 1 instead
+ * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+ * to keep the value positive), instead of adding one.
+ *
+ * On HSW+ the behaviour of the scanline counter depends on the output
+ * type. For DP ports it behaves like most other platforms, but on HDMI
+ * there's an extra 1 line difference. So we need to add two instead of
+ * one to the value.
+ */
+ if (IS_GEN2(dev)) {
+ const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ int vtotal;
+
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ crtc->scanline_offset = vtotal - 1;
+ } else if (HAS_DDI(dev) &&
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+ crtc->scanline_offset = 2;
+ } else
+ crtc->scanline_offset = 1;
+}
+
+static int __intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *saved_mode;
+ struct intel_crtc_config *pipe_config = NULL;
+ struct intel_crtc *intel_crtc;
+ unsigned disable_pipes, prepare_pipes, modeset_pipes;
+ int ret = 0;
+
+ saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
+ if (!saved_mode)
+ return -ENOMEM;
+
+ intel_modeset_affected_pipes(crtc, &modeset_pipes,
+ &prepare_pipes, &disable_pipes);
+
+ *saved_mode = crtc->mode;
+
+ /* Hack: Because we don't (yet) support global modeset on multiple
+ * crtcs, we don't keep track of the new mode for more than one crtc.
+ * Hence simply check whether any bit is set in modeset_pipes in all the
+ * pieces of code that are not yet converted to deal with mutliple crtcs
+ * changing their mode at the same time. */
+ if (modeset_pipes) {
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ pipe_config = NULL;
+
+ goto out;
+ }
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
+ to_intel_crtc(crtc)->new_config = pipe_config;
+ }
+
+ /*
+ * See if the config requires any additional preparation, e.g.
+ * to adjust global state with pipes off. We need to do this
+ * here so we can get the modeset_pipe updated config for the new
+ * mode set on this crtc. For other crtcs we need to use the
+ * adjusted_mode bits in the crtc directly.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ valleyview_modeset_global_pipes(dev, &prepare_pipes);
+
+ /* may have added more to prepare_pipes than we should */
+ prepare_pipes &= ~disable_pipes;
+ }
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ if (intel_crtc->base.enabled)
+ dev_priv->display.crtc_disable(&intel_crtc->base);
+ }
+
+ /* crtc->mode is already used by the ->mode_set callbacks, hence we need
+ * to set it here already despite that we pass it down the callchain.
+ */
+ if (modeset_pipes) {
+ crtc->mode = *mode;
+ /* mode_set/enable/disable functions rely on a correct pipe
+ * config. */
+ to_intel_crtc(crtc)->config = *pipe_config;
+ to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
+
+ /*
+ * Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc,
+ &pipe_config->adjusted_mode);
+ }
+
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_state(dev, prepare_pipes);
+
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ struct drm_framebuffer *old_fb;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(fb)->obj,
+ NULL);
+ if (ret != 0) {
+ DRM_ERROR("pin & fence failed\n");
+ mutex_unlock(&dev->struct_mutex);
+ goto done;
+ }
+ old_fb = crtc->primary->fb;
+ if (old_fb)
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ crtc->primary->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
+ ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
+ x, y, fb);
+ if (ret)
+ goto done;
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ update_scanline_offset(intel_crtc);
+
+ dev_priv->display.crtc_enable(&intel_crtc->base);
+ }
+
+ /* FIXME: add subpixel order */
+done:
+ if (ret && crtc->enabled)
+ crtc->mode = *saved_mode;
+
+out:
+ kfree(pipe_config);
+ kfree(saved_mode);
+ return ret;
+}
+
+static int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ int ret;
+
+ ret = __intel_set_mode(crtc, mode, x, y, fb);
+
+ if (ret == 0)
+ intel_modeset_check_state(crtc->dev);
+
+ return ret;
+}
+
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
+}
+
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+ if (!config)
+ return;
+
+ kfree(config->save_connector_encoders);
+ kfree(config->save_encoder_crtcs);
+ kfree(config->save_crtc_enabled);
+ kfree(config);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int count;
+
+ config->save_crtc_enabled =
+ kcalloc(dev->mode_config.num_crtc,
+ sizeof(bool), GFP_KERNEL);
+ if (!config->save_crtc_enabled)
+ return -ENOMEM;
+
+ config->save_encoder_crtcs =
+ kcalloc(dev->mode_config.num_encoder,
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!config->save_encoder_crtcs)
+ return -ENOMEM;
+
+ config->save_connector_encoders =
+ kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!config->save_connector_encoders)
+ return -ENOMEM;
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ for_each_crtc(dev, crtc) {
+ config->save_crtc_enabled[count++] = crtc->enabled;
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ config->save_encoder_crtcs[count++] = encoder->crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ config->save_connector_encoders[count++] = connector->encoder;
+ }
+
+ return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int count;
+
+ count = 0;
+ for_each_intel_crtc(dev, crtc) {
+ crtc->new_enabled = config->save_crtc_enabled[count++];
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(config->save_encoder_crtcs[count++]);
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ connector->new_encoder =
+ to_intel_encoder(config->save_connector_encoders[count++]);
+ }
+}
+
+static bool
+is_crtc_connector_off(struct drm_mode_set *set)
+{
+ int i;
+
+ if (set->num_connectors == 0)
+ return false;
+
+ if (WARN_ON(set->connectors == NULL))
+ return false;
+
+ for (i = 0; i < set->num_connectors; i++)
+ if (set->connectors[i]->encoder &&
+ set->connectors[i]->encoder->crtc == set->crtc &&
+ set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
+ return true;
+
+ return false;
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (is_crtc_connector_off(set)) {
+ config->mode_changed = true;
+ } else if (set->crtc->primary->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->primary->fb == NULL) {
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(set->crtc);
+
+ if (intel_crtc->active && i915.fastboot) {
+ DRM_DEBUG_KMS("crtc has no fb, will flip\n");
+ config->fb_changed = true;
+ } else {
+ DRM_DEBUG_KMS("inactive crtc, full mode set\n");
+ config->mode_changed = true;
+ }
+ } else if (set->fb == NULL) {
+ config->mode_changed = true;
+ } else if (set->fb->pixel_format !=
+ set->crtc->primary->fb->pixel_format) {
+ config->mode_changed = true;
+ } else {
+ config->fb_changed = true;
+ }
+ }
+
+ if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+ config->fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ config->mode_changed = true;
+ }
+
+ DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
+ set->crtc->base.id, config->mode_changed, config->fb_changed);
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+ struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ int ro;
+
+ /* The upper layers ensure that we either disable a crtc or have a list
+ * of connectors. For paranoia, double-check this. */
+ WARN_ON(!set->fb && (set->num_connectors != 0));
+ WARN_ON(set->fb && (set->num_connectors == 0));
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* Otherwise traverse passed in connector list and get encoders
+ * for them. */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base) {
+ connector->new_encoder = connector->encoder;
+ break;
+ }
+ }
+
+ /* If we disable the crtc, disable all its connectors. Also, if
+ * the connector is on the changing crtc but not on the new
+ * connector list, disable it. */
+ if ((!set->fb || ro == set->num_connectors) &&
+ connector->base.encoder &&
+ connector->base.encoder->crtc == set->crtc) {
+ connector->new_encoder = NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.base.id,
+ connector->base.name);
+ }
+
+
+ if (&connector->new_encoder->base != connector->base.encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* connector->new_encoder is now updated for all connectors. */
+
+ /* Update crtc of enabled connectors. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ struct drm_crtc *new_crtc;
+
+ if (!connector->new_encoder)
+ continue;
+
+ new_crtc = connector->new_encoder->base.crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
+ return -EINVAL;
+ }
+ connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.base.id,
+ connector->base.name,
+ new_crtc->base.id);
+ }
+
+ /* Check for any encoders that needs to be disabled. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ int num_connectors = 0;
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder == encoder) {
+ WARN_ON(!connector->new_encoder->new_crtc);
+ num_connectors++;
+ }
+ }
+
+ if (num_connectors == 0)
+ encoder->new_crtc = NULL;
+ else if (num_connectors > 1)
+ return -EINVAL;
+
+ /* Only now check for crtc changes so we don't miss encoders
+ * that will be disabled. */
+ if (&encoder->new_crtc->base != encoder->base.crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* Now we've also updated encoder->new_crtc for all encoders. */
+
+ for_each_intel_crtc(dev, crtc) {
+ crtc->new_enabled = false;
+
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == crtc) {
+ crtc->new_enabled = true;
+ break;
+ }
+ }
+
+ if (crtc->new_enabled != crtc->base.enabled) {
+ DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+ crtc->new_enabled ? "en" : "dis");
+ config->mode_changed = true;
+ }
+
+ if (crtc->new_enabled)
+ crtc->new_config = &crtc->config;
+ else
+ crtc->new_config = NULL;
+ }
+
+ return 0;
+}
+
+static void disable_crtc_nofb(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
+ pipe_name(crtc->pipe));
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ if (connector->new_encoder &&
+ connector->new_encoder->new_crtc == crtc)
+ connector->new_encoder = NULL;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (encoder->new_crtc == crtc)
+ encoder->new_crtc = NULL;
+ }
+
+ crtc->new_enabled = false;
+ crtc->new_config = NULL;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_mode_set save_set;
+ struct intel_set_config *config;
+ int ret;
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ }
+
+ dev = set->crtc->dev;
+
+ ret = -ENOMEM;
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ goto out_config;
+
+ ret = intel_set_config_save_state(dev, config);
+ if (ret)
+ goto out_config;
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->primary->fb;
+
+ /* Compute whether we need a full modeset, only an fb base update or no
+ * change at all. In the future we might also check whether only the
+ * mode changed, e.g. for LVDS where we only change the panel fitter in
+ * such cases. */
+ intel_set_config_compute_mode_changes(set, config);
+
+ ret = intel_modeset_stage_output_state(dev, set, config);
+ if (ret)
+ goto fail;
+
+ if (config->mode_changed) {
+ ret = intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb);
+ } else if (config->fb_changed) {
+ intel_crtc_wait_for_pending_flips(set->crtc);
+
+ ret = intel_pipe_set_base(set->crtc,
+ set->x, set->y, set->fb);
+ /*
+ * In the fastboot case this may be our only check of the
+ * state after boot. It would be better to only do it on
+ * the first update, but we don't have a nice way of doing that
+ * (and really, set_config isn't used much for high freq page
+ * flipping, so increasing its cost here shouldn't be a big
+ * deal).
+ */
+ if (i915.fastboot && ret == 0)
+ intel_modeset_check_state(set->crtc->dev);
+ }
+
+ if (ret) {
+ DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
+ set->crtc->base.id, ret);
+fail:
+ intel_set_config_restore_state(dev, config);
+
+ /*
+ * HACK: if the pipe was on, but we didn't have a framebuffer,
+ * force the pipe off to avoid oopsing in the modeset code
+ * due to fb==NULL. This should only happen during boot since
+ * we don't yet reconstruct the FB from the hardware state.
+ */
+ if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
+ disable_crtc_nofb(to_intel_crtc(save_set.crtc));
+
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+ }
+
+out_config:
+ intel_set_config_free(config);
+ return ret;
+}
+
static const struct drm_crtc_funcs intel_crtc_funcs = {
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+}
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ val = I915_READ(PCH_DPLL(pll->id));
+ hw_state->dpll = val;
+ hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
+ hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+
+ return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ /* PCH refclock must be enabled first */
+ ibx_assert_pch_refclk_enabled(dev_priv);
+
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ /* Make sure no transcoder isn't still depending on us. */
+ for_each_intel_crtc(dev, crtc) {
+ if (intel_crtc_to_shared_dpll(crtc) == pll)
+ assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
+ }
+
+ I915_WRITE(PCH_DPLL(pll->id), 0);
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(200);
+}
+
+static char *ibx_pch_dpll_names[] = {
+ "PCH DPLL A",
+ "PCH DPLL B",
+};
+
+static void ibx_pch_dpll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->num_shared_dpll = 2;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+ dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
+ dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
+ dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ ibx_pch_dpll_get_hw_state;
+ }
+}
+
+static void intel_shared_dpll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ibx_pch_dpll_init(dev);
+ else
+ dev_priv->num_shared_dpll = 0;
+
+ BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+}
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
- intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
- intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
intel_crtc->lut_b[i] = i;
}
- /* Swap pipes & planes for FBC on pre-965 */
+ /*
+ * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
+ * is hooked to plane B. Hence we want plane A feeding pipe B.
+ */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ intel_crtc->plane = !pipe;
}
+ intel_crtc->cursor_base = ~0;
+ intel_crtc->cursor_cntl = ~0;
+
+ init_waitqueue_head(&intel_crtc->vbl_wait);
+
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
- intel_crtc->busy = false;
+ WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+}
- setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
- (unsigned long)intel_crtc);
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
+{
+ struct drm_encoder *encoder = connector->base.encoder;
+ struct drm_device *dev = connector->base.dev;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!encoder)
+ return INVALID_PIPE;
+
+ return to_intel_crtc(encoder->crtc)->pipe;
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
DRM_ERROR("no such CRTC id\n");
- return -EINVAL;
+ return -ENOENT;
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
@@ -4231,154 +11041,230 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_crtc *crtc = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->pipe == pipe)
- break;
- }
- return crtc;
-}
-
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
int index_mask = 0;
- struct drm_connector *connector;
int entry = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- if (type_mask & intel_output->clone_mask)
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+ if (encoders_cloneable(encoder, source_encoder))
index_mask |= (1 << entry);
+
entry++;
}
+
return index_mask;
}
+static bool has_edp_a(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_MOBILE(dev))
+ return false;
+
+ if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ return false;
+
+ if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ return false;
+
+ return true;
+}
+
+const char *intel_output_name(int output)
+{
+ static const char *names[] = {
+ [INTEL_OUTPUT_UNUSED] = "Unused",
+ [INTEL_OUTPUT_ANALOG] = "Analog",
+ [INTEL_OUTPUT_DVO] = "DVO",
+ [INTEL_OUTPUT_SDVO] = "SDVO",
+ [INTEL_OUTPUT_LVDS] = "LVDS",
+ [INTEL_OUTPUT_TVOUT] = "TV",
+ [INTEL_OUTPUT_HDMI] = "HDMI",
+ [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
+ [INTEL_OUTPUT_EDP] = "eDP",
+ [INTEL_OUTPUT_DSI] = "DSI",
+ [INTEL_OUTPUT_UNKNOWN] = "Unknown",
+ };
+
+ if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
+ return "Invalid";
+
+ return names[output];
+}
+
+static bool intel_crt_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_ULT(dev))
+ return false;
+
+ if (IS_CHERRYVIEW(dev))
+ return false;
+
+ if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+ return false;
+
+ return true;
+}
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
+ struct intel_encoder *encoder;
+ bool dpd_is_edp = false;
- intel_crt_init(dev);
+ intel_lvds_init(dev);
- /* Set up integrated LVDS */
- if (IS_MOBILE(dev) && !IS_I830(dev))
- intel_lvds_init(dev);
+ if (intel_crt_present(dev))
+ intel_crt_init(dev);
+
+ if (HAS_DDI(dev)) {
+ int found;
- if (IS_IRONLAKE(dev)) {
+ /* Haswell uses DDI functions to detect digital outputs */
+ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+ /* DDI A only supports eDP */
+ if (found)
+ intel_ddi_init(dev, PORT_A);
+
+ /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ * register */
+ found = I915_READ(SFUSE_STRAP);
+
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev, PORT_D);
+ } else if (HAS_PCH_SPLIT(dev)) {
int found;
+ dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
- if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
- intel_dp_init(dev, DP_A);
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
- if (I915_READ(HDMIB) & PORT_DETECTED) {
- /* check SDVOB */
- /* found = intel_sdvo_init(dev, HDMIB); */
- found = 0;
+ if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
- intel_hdmi_init(dev, HDMIB);
+ intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_B);
+ intel_dp_init(dev, PCH_DP_B, PORT_B);
}
- if (I915_READ(HDMIC) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMIC);
+ if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+ intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
- if (I915_READ(HDMID) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMID);
+ if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+ intel_hdmi_init(dev, PCH_HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_C);
+ intel_dp_init(dev, PCH_DP_C, PORT_C);
if (I915_READ(PCH_DP_D) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_D);
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
+ } else if (IS_VALLEYVIEW(dev)) {
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+ PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+ }
+
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
+ PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
+ }
+ if (IS_CHERRYVIEW(dev)) {
+ if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
+ PORT_D);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
+ }
+ }
+
+ intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
- if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, SDVOB);
+ found = intel_sdvo_init(dev, GEN3_SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
- if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
- DRM_DEBUG_KMS("probing DP_B\n");
- intel_dp_init(dev, DP_B);
- }
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_B, PORT_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, SDVOC);
+ found = intel_sdvo_init(dev, GEN3_SDVOC, false);
}
- if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
+ if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, SDVOC);
- }
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- DRM_DEBUG_KMS("probing DP_C\n");
- intel_dp_init(dev, DP_C);
+ intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
+ if (SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_C, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev) &&
- (I915_READ(DP_D) & DP_DETECTED)) {
- DRM_DEBUG_KMS("probing DP_D\n");
- intel_dp_init(dev, DP_D);
- }
- } else if (IS_I8XX(dev))
+ (I915_READ(DP_D) & DP_DETECTED))
+ intel_dp_init(dev, DP_D, PORT_D);
+ } else if (IS_GEN2(dev))
intel_dvo_init(dev);
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_encoder *encoder = &intel_output->enc;
-
- encoder->possible_crtcs = intel_output->crtc_mask;
- encoder->possible_clones = intel_connector_clones(dev,
- intel_output->clone_mask);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_clones =
+ intel_encoder_clones(encoder);
}
+
+ intel_init_pch_refclk(dev);
+
+ drm_helper_move_panel_connectors_to_head(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(intel_fb->obj);
- mutex_unlock(&dev->struct_mutex);
-
+ WARN_ON(!intel_fb->obj->framebuffer_references--);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
+ struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_gem_object *object = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb->obj;
- return drm_gem_handle_create(file_priv, object, handle);
+ return drm_gem_handle_create(file, &obj->base, handle);
}
static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -4386,209 +11272,220 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj)
+static int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
{
- struct intel_framebuffer *intel_fb;
+ int aligned_height;
+ int pitch_limit;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return -ENOMEM;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
+ return -EINVAL;
}
- drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
+ return -EINVAL;
+ }
- intel_fb->obj = obj;
+ if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
+ pitch_limit = 32*1024;
+ } else if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode)
+ pitch_limit = 16*1024;
+ else
+ pitch_limit = 32*1024;
+ } else if (INTEL_INFO(dev)->gen >= 3) {
+ if (obj->tiling_mode)
+ pitch_limit = 8*1024;
+ else
+ pitch_limit = 16*1024;
+ } else
+ /* XXX DSPC is limited to 4k tiled */
+ pitch_limit = 8*1024;
+
+ if (mode_cmd->pitches[0] > pitch_limit) {
+ DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
+ obj->tiling_mode ? "tiled" : "linear",
+ mode_cmd->pitches[0], pitch_limit);
+ return -EINVAL;
+ }
- *fb = &intel_fb->base;
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
+ return -EINVAL;
+ }
- return 0;
-}
+ /* Reject formats not supported by any plane early. */
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0)
+ return -EINVAL;
-static struct drm_framebuffer *
-intel_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- struct drm_mode_fb_cmd *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
- int ret;
+ aligned_height = intel_align_height(dev, mode_cmd->height,
+ obj->tiling_mode);
+ /* FIXME drm helper for size checks (especially planar formats)? */
+ if (obj->base.size < aligned_height * mode_cmd->pitches[0])
+ return -EINVAL;
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
- if (!obj)
- return NULL;
+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ intel_fb->obj = obj;
+ intel_fb->obj->framebuffer_references++;
- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+ ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return NULL;
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
}
- return fb;
+ return 0;
}
-static const struct drm_mode_config_funcs intel_mode_funcs = {
- .fb_create = intel_user_framebuffer_create,
- .fb_changed = intelfb_probe,
-};
-
-static struct drm_gem_object *
-intel_alloc_power_context(struct drm_device *dev)
+static struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *pwrctx;
- int ret;
-
- pwrctx = drm_gem_object_alloc(dev, 4096);
- if (!pwrctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
+ struct drm_i915_gem_object *obj;
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(pwrctx, 4096);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+ mode_cmd->handles[0]));
+ if (&obj->base == NULL)
+ return ERR_PTR(-ENOENT);
- ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
- mutex_unlock(&dev->struct_mutex);
-
- return pwrctx;
-
-err_unpin:
- i915_gem_object_unpin(pwrctx);
-err_unref:
- drm_gem_object_unreference(pwrctx);
- mutex_unlock(&dev->struct_mutex);
- return NULL;
+ return intel_framebuffer_create(dev, mode_cmd, obj);
}
-void intel_init_clock_gating(struct drm_device *dev)
+#ifndef CONFIG_DRM_I915_FBDEV
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * Disable clock gating reported to work incorrectly according to the
- * specs, but enable as much else as we can.
- */
- if (IS_IRONLAKE(dev)) {
- return;
- } else if (IS_G4X(dev)) {
- uint32_t dspclk_gate;
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
- GS_UNIT_CLOCK_GATE_DISABLE |
- CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
- dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
- OVRUNIT_CLOCK_GATE_DISABLE |
- OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
- dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
- } else if (IS_I965GM(dev)) {
- I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(DSPCLK_GATE_D, 0);
- I915_WRITE(RAMCLK_GATE_D, 0);
- I915_WRITE16(DEUC, 0);
- } else if (IS_I965G(dev)) {
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
- I965_RCC_CLOCK_GATE_DISABLE |
- I965_RCPB_CLOCK_GATE_DISABLE |
- I965_ISC_CLOCK_GATE_DISABLE |
- I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- } else if (IS_I9XX(dev)) {
- u32 dstate = I915_READ(D_STATE);
-
- dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
- DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
- } else if (IS_I85X(dev) || IS_I865G(dev)) {
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
- } else if (IS_I830(dev)) {
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
- }
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_i915_gem_object *obj_priv = NULL;
-
- if (dev_priv->pwrctx) {
- obj_priv = dev_priv->pwrctx->driver_private;
- } else {
- struct drm_gem_object *pwrctx;
-
- pwrctx = intel_alloc_power_context(dev);
- if (pwrctx) {
- dev_priv->pwrctx = pwrctx;
- obj_priv = pwrctx->driver_private;
- }
- }
-
- if (obj_priv) {
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
- I915_WRITE(MCHBAR_RENDER_STANDBY,
- I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
- }
- }
}
+#endif
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+ .fb_create = intel_user_framebuffer_create,
+ .output_poll_changed = intel_fbdev_output_poll_changed,
+};
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* We always want a DPMS function */
- if (IS_IRONLAKE(dev))
- dev_priv->display.dpms = ironlake_crtc_dpms;
+ if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
+ dev_priv->display.find_dpll = g4x_find_best_dpll;
+ else if (IS_CHERRYVIEW(dev))
+ dev_priv->display.find_dpll = chv_find_best_dpll;
+ else if (IS_VALLEYVIEW(dev))
+ dev_priv->display.find_dpll = vlv_find_best_dpll;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.find_dpll = pnv_find_best_dpll;
else
- dev_priv->display.dpms = i9xx_crtc_dpms;
-
- /* Only mobile has FBC, leave pointers NULL for other chips */
- if (IS_MOBILE(dev)) {
- if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
- }
- /* 855GM needs testing */
+ dev_priv->display.find_dpll = i9xx_find_best_dpll;
+
+ if (HAS_DDI(dev)) {
+ dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_plane_config = ironlake_get_plane_config;
+ dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_enable = ironlake_crtc_enable;
+ dev_priv->display.crtc_disable = ironlake_crtc_disable;
+ dev_priv->display.off = ironlake_crtc_off;
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = valleyview_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display.off = i9xx_crtc_off;
+ dev_priv->display.update_primary_plane =
+ i9xx_update_primary_plane;
+ } else {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_plane_config = i9xx_get_plane_config;
+ dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display.off = i9xx_crtc_off;
+ dev_priv->display.update_primary_plane =
+ i9xx_update_primary_plane;
}
/* Returns the core display clock speed */
- if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ valleyview_get_display_clock_speed;
+ else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
@@ -4602,140 +11499,861 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- /* For FIFO watermark updates */
- if (IS_IRONLAKE(dev))
- dev_priv->display.update_wm = NULL;
- else if (IS_G4X(dev))
- dev_priv->display.update_wm = g4x_update_wm;
- else if (IS_I965G(dev))
- dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else {
- if (IS_I85X(dev))
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- else if (IS_845G(dev))
- dev_priv->display.get_fifo_size = i845_get_fifo_size;
- else
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- dev_priv->display.update_wm = i830_update_wm;
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_GEN5(dev)) {
+ dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_GEN6(dev)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ snb_modeset_global_resources;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
+ } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+ dev_priv->display.write_eld = haswell_write_eld;
+ dev_priv->display.modeset_global_resources =
+ haswell_modeset_global_resources;
+ }
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.write_eld = g4x_write_eld;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.modeset_global_resources =
+ valleyview_modeset_global_resources;
+ dev_priv->display.write_eld = ironlake_write_eld;
}
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
+
+ intel_panel_init_backlight_funcs(dev);
}
-void intel_modeset_init(struct drm_device *dev)
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times. This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+ DRM_INFO("applying pipe a force quirk\n");
+}
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int num_pipe;
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_device *dev);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_device *dev);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
+static struct intel_quirk intel_quirks[] = {
+ /* HP Mini needs pipe A force quirk (LP: #322104) */
+ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+
+ /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+ { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+ struct pci_dev *d = dev->pdev;
int i;
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
+ }
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(dev);
+ }
+}
+
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
+
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(SR01, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1<<5, VGA_SR_DATA);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+ intel_prepare_ddi(dev);
+
+ intel_init_clock_gating(dev);
+
+ intel_reset_dpio(dev);
+
+ intel_enable_gt_powersave(dev);
+}
+
+void intel_modeset_suspend_hw(struct drm_device *dev)
+{
+ intel_suspend_hw(dev);
+}
+
+void intel_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int sprite, ret;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ dev->mode_config.funcs = &intel_mode_funcs;
+
+ intel_init_quirks(dev);
+
+ intel_init_pm(dev);
+
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return;
intel_init_display(dev);
- if (IS_I965G(dev)) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_I9XX(dev)) {
+ if (IS_GEN2(dev)) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else if (IS_GEN3(dev)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
- /* set memory base */
- if (IS_I9XX(dev))
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- else
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+ if (IS_GEN2(dev)) {
+ dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
+ dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+ } else {
+ dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
+ dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+ }
+
+ dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
- if (IS_MOBILE(dev) || IS_I9XX(dev))
- num_pipe = 2;
- else
- num_pipe = 1;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- num_pipe, num_pipe > 1 ? "s" : "");
+ INTEL_INFO(dev)->num_pipes,
+ INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+
+ for_each_pipe(pipe) {
+ intel_crtc_init(dev, pipe);
+ for_each_sprite(pipe, sprite) {
+ ret = intel_plane_init(dev, pipe, sprite);
+ if (ret)
+ DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
+ pipe_name(pipe), sprite_name(pipe, sprite), ret);
+ }
+ }
- if (IS_I85X(dev))
- pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
- else if (IS_I9XX(dev) || IS_G4X(dev))
- pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
+ intel_init_dpio(dev);
+ intel_reset_dpio(dev);
- for (i = 0; i < num_pipe; i++) {
- intel_crtc_init(dev, i);
- }
+ intel_cpu_pll_init(dev);
+ intel_shared_dpll_init(dev);
+ /* Just disable it once at startup */
+ i915_disable_vga(dev);
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, false);
+ drm_modeset_unlock_all(dev);
+
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ if (dev_priv->display.get_plane_config) {
+ dev_priv->display.get_plane_config(crtc,
+ &crtc->plane_config);
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_plane_obj(crtc, &crtc->plane_config);
+ }
+ }
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector *crt = NULL;
+ struct intel_load_detect_pipe load_detect_temp;
+ struct drm_modeset_acquire_ctx ctx;
+
+ /* We can't just switch on the pipe A, we need to set things up with a
+ * proper mode and output configuration. As a gross hack, enable pipe A
+ * by enabling the load detect pipe once. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+ crt = &connector->base;
+ break;
+ }
+ }
+
+ if (!crt)
+ return;
+
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
+ intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
- INIT_WORK(&dev_priv->idle_work, intel_idle_update);
- setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
- (unsigned long)dev);
+
+}
+
+static bool
+intel_check_plane_mapping(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ if (INTEL_INFO(dev)->num_pipes == 1)
+ return true;
+
+ reg = DSPCNTR(!crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ return false;
+
+ return true;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ reg = PIPECONF(crtc->config.cpu_transcoder);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+ /* restore vblank interrupts to correct state */
+ if (crtc->active)
+ drm_vblank_on(dev, crtc->pipe);
+ else
+ drm_vblank_off(dev, crtc->pipe);
+
+ /* We need to sanitize the plane -> pipe mapping first because this will
+ * disable the crtc (and hence change the state) if it is wrong. Note
+ * that gen4+ has a fixed plane -> pipe mapping. */
+ if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ struct intel_connector *connector;
+ bool plane;
+
+ DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+ crtc->base.base.id);
+
+ /* Pipe has the wrong plane attached and the plane is active.
+ * Temporarily change the plane mapping and disable everything
+ * ... */
+ plane = crtc->plane;
+ crtc->plane = !plane;
+ crtc->primary_enabled = true;
+ dev_priv->display.crtc_disable(&crtc->base);
+ crtc->plane = plane;
+
+ /* ... and break all links. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ /* multiple connectors may have the same encoder:
+ * handle them and break crtc link separately */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head)
+ if (connector->encoder->base.crtc == &crtc->base) {
+ connector->encoder->base.crtc = NULL;
+ connector->encoder->connectors_active = false;
+ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+ }
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ crtc->pipe == PIPE_A && !crtc->active) {
+ /* BIOS forgot to enable pipe A, this mostly happens after
+ * resume. Force-enable the pipe to fix this, the update_dpms
+ * call below we restore the pipe to the right state, but leave
+ * the required bits on. */
+ intel_enable_pipe_a(dev);
+ }
+
+ /* Adjust the state of the output pipe according to whether we
+ * have active connectors/encoders. */
+ intel_crtc_update_dpms(&crtc->base);
+
+ if (crtc->active != crtc->base.enabled) {
+ struct intel_encoder *encoder;
+
+ /* This can happen either due to bugs in the get_hw_state
+ * functions or because the pipe is force-enabled due to the
+ * pipe A quirk. */
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+ crtc->base.base.id,
+ crtc->base.enabled ? "enabled" : "disabled",
+ crtc->active ? "enabled" : "disabled");
+
+ crtc->base.enabled = crtc->active;
+
+ /* Because we only establish the connector -> encoder ->
+ * crtc links if something is active, this means the
+ * crtc is now deactivated. Break the links. connector
+ * -> encoder links are only establish when things are
+ * actually up, hence no need to break them. */
+ WARN_ON(crtc->active);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ WARN_ON(encoder->connectors_active);
+ encoder->base.crtc = NULL;
+ }
+ }
+
+ if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
+ /*
+ * We start out with underrun reporting disabled to avoid races.
+ * For correct bookkeeping mark this on active crtcs.
+ *
+ * Also on gmch platforms we dont have any hardware bits to
+ * disable the underrun reporting. Which means we need to start
+ * out with underrun reporting disabled also on inactive pipes,
+ * since otherwise we'll complain about the garbage we read when
+ * e.g. coming up after runtime pm.
+ *
+ * No protection against concurrent access is required - at
+ * worst a fifo underrun happens which also sets this to false.
+ */
+ crtc->cpu_fifo_underrun_disabled = true;
+ crtc->pch_fifo_underrun_disabled = true;
+
+ update_scanline_offset(crtc);
+ }
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct intel_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* We need to check both for a crtc link (meaning that the
+ * encoder is active and trying to read from a pipe) and the
+ * pipe itself being active. */
+ bool has_active_crtc = encoder->base.crtc &&
+ to_intel_crtc(encoder->base.crtc)->active;
+
+ if (encoder->connectors_active && !has_active_crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ /* Connector is active, but has no active pipe. This is
+ * fallout from our resume register restoring. Disable
+ * the encoder manually again. */
+ if (encoder->base.crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
+ encoder->disable(encoder);
+ }
+ encoder->base.crtc = NULL;
+ encoder->connectors_active = false;
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+ * the crtc fixup. */
+}
+
+void i915_redisable_vga_power_on(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
+
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ i915_disable_vga(dev);
+ }
+}
+
+void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses. */
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
+ return;
+
+ i915_redisable_vga_power_on(dev);
+}
+
+static bool primary_get_hw_state(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (!crtc->active)
+ return false;
+
+ return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+}
+
+static void intel_modeset_readout_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int i;
+
+ for_each_intel_crtc(dev, crtc) {
+ memset(&crtc->config, 0, sizeof(crtc->config));
+
+ crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
+ crtc->active = dev_priv->display.get_pipe_config(crtc,
+ &crtc->config);
+
+ crtc->base.enabled = crtc->active;
+ crtc->primary_enabled = primary_get_hw_state(crtc);
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+ crtc->base.base.id,
+ crtc->active ? "enabled" : "disabled");
+ }
+
+ /* FIXME: Smash this into the new shared dpll infrastructure. */
+ if (HAS_DDI(dev))
+ intel_ddi_setup_hw_pll_state(dev);
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
+ pll->active = 0;
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+ pll->active++;
+ }
+ pll->refcount = pll->active;
+
+ DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
+ pll->name, pll->refcount, pll->on);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ encoder->base.crtc = &crtc->base;
+ encoder->get_config(encoder, &crtc->config);
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ encoder->connectors_active = false;
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+ encoder->base.base.id,
+ encoder->base.name,
+ encoder->base.crtc ? "enabled" : "disabled",
+ pipe_name(pipe));
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->get_hw_state(connector)) {
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+ connector->encoder->connectors_active = true;
+ connector->base.encoder = &connector->encoder->base;
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id,
+ connector->base.name,
+ connector->base.encoder ? "enabled" : "disabled");
+ }
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ int i;
+
+ intel_modeset_readout_hw_state(dev);
+
+ /*
+ * Now that we have the config, copy it to each CRTC struct
+ * Note that this could go away if we move to using crtc_config
+ * checking everywhere.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->active && i915.fastboot) {
+ intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
+ DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
+ crtc->base.base.id);
+ drm_mode_debug_printmodeline(&crtc->base.mode);
+ }
+ }
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ intel_sanitize_encoder(encoder);
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_sanitize_crtc(crtc);
+ intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
+ }
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ if (!pll->on || pll->active)
+ continue;
+
+ DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
+
+ pll->disable(dev_priv, pll);
+ pll->on = false;
+ }
+
+ if (HAS_PCH_SPLIT(dev))
+ ilk_wm_get_hw_state(dev);
+
+ if (force_restore) {
+ i915_redisable_vga(dev);
+
+ /*
+ * We need to use raw interfaces for restoring state to avoid
+ * checking (bogus) intermediate states.
+ */
+ for_each_pipe(pipe) {
+ struct drm_crtc *crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+
+ __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ crtc->primary->fb);
+ }
+ } else {
+ intel_modeset_update_staged_output_state(dev);
+ }
+
+ intel_modeset_check_state(dev);
+}
+
+void intel_modeset_gem_init(struct drm_device *dev)
+{
+ struct drm_crtc *c;
+ struct intel_framebuffer *fb;
+
+ mutex_lock(&dev->struct_mutex);
+ intel_init_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->fsb_freq,
- dev_priv->mem_freq))
- DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), disabling CxSR\n",
- dev_priv->fsb_freq, dev_priv->mem_freq);
+ /*
+ * Make sure any fbs we allocated at startup are properly
+ * pinned & fenced. When we do the allocation it's too early
+ * for this.
+ */
+ mutex_lock(&dev->struct_mutex);
+ for_each_crtc(dev, c) {
+ if (!c->primary->fb)
+ continue;
+
+ fb = to_intel_framebuffer(c->primary->fb);
+ if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+ DRM_ERROR("failed to pin boot fb on pipe %d\n",
+ to_intel_crtc(c)->pipe);
+ drm_framebuffer_unreference(c->primary->fb);
+ c->primary->fb = NULL;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+void intel_connector_unregister(struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+
+ intel_panel_destroy_backlight(connector);
+ drm_sysfs_connector_remove(connector);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ /*
+ * Interrupts and polling as the first thing to avoid creating havoc.
+ * Too much stuff here (turning of rps, connectors, ...) would
+ * experience fancy races otherwise.
+ */
+ drm_irq_uninstall(dev);
+ cancel_work_sync(&dev_priv->hotplug_work);
+ /*
+ * Due to the hpd irq storm handling the hotplug work can re-arm the
+ * poll handlers. Hence disable polling after hpd handling is shut down.
+ */
+ drm_kms_helper_poll_fini(dev);
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_unregister_dsm_handler();
+
+ for_each_crtc(dev, crtc) {
/* Skip inactive CRTCs */
- if (!crtc->fb)
+ if (!crtc->primary->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- intel_increase_pllclock(crtc, false);
- del_timer_sync(&intel_crtc->idle_timer);
+ intel_increase_pllclock(crtc);
}
- del_timer_sync(&dev_priv->idle_timer);
+ intel_disable_fbc(dev);
- if (dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ intel_disable_gt_powersave(dev);
- if (dev_priv->pwrctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = dev_priv->pwrctx->driver_private;
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
- I915_READ(PWRCTXA);
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(dev_priv->pwrctx);
- }
+ ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
+ /* flush any delayed tasks or pending work */
+ flush_scheduled_work();
+
+ /* destroy the backlight and sysfs files before encoders/connectors */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct intel_connector *intel_connector;
+
+ intel_connector = to_intel_connector(connector);
+ intel_connector->unregister(intel_connector);
+ }
+
drm_mode_config_cleanup(dev);
-}
+ intel_cleanup_overlay(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_cleanup_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
+/*
+ * Return which encoder is currently attached for connector.
+ */
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ return &intel_attached_encoder(connector)->base;
+}
- return &intel_output->enc;
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
}
/*
@@ -4744,13 +12362,208 @@ struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
+ DRM_ERROR("failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
+ return 0;
+
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+
+ if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
+ DRM_ERROR("failed to write control word\n");
+ return -EIO;
+ }
+
return 0;
}
+
+struct intel_display_error_state {
+
+ u32 power_well_driver;
+
+ int num_transcoders;
+
+ struct intel_cursor_error_state {
+ u32 control;
+ u32 position;
+ u32 base;
+ u32 size;
+ } cursor[I915_MAX_PIPES];
+
+ struct intel_pipe_error_state {
+ bool power_domain_on;
+ u32 source;
+ u32 stat;
+ } pipe[I915_MAX_PIPES];
+
+ struct intel_plane_error_state {
+ u32 control;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 addr;
+ u32 surface;
+ u32 tile_offset;
+ } plane[I915_MAX_PIPES];
+
+ struct intel_transcoder_error_state {
+ bool power_domain_on;
+ enum transcoder cpu_transcoder;
+
+ u32 conf;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } transcoder[4];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_display_error_state *error;
+ int transcoders[] = {
+ TRANSCODER_A,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ };
+ int i;
+
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return NULL;
+
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
+
+ for_each_pipe(i) {
+ error->pipe[i].power_domain_on =
+ intel_display_power_enabled_unlocked(dev_priv,
+ POWER_DOMAIN_PIPE(i));
+ if (!error->pipe[i].power_domain_on)
+ continue;
+
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+
+ error->plane[i].control = I915_READ(DSPCNTR(i));
+ error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ if (INTEL_INFO(dev)->gen <= 3) {
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos = I915_READ(DSPPOS(i));
+ }
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->plane[i].surface = I915_READ(DSPSURF(i));
+ error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ }
+
+ error->pipe[i].source = I915_READ(PIPESRC(i));
+
+ if (!HAS_PCH_SPLIT(dev))
+ error->pipe[i].stat = I915_READ(PIPESTAT(i));
+ }
+
+ error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ if (HAS_DDI(dev_priv->dev))
+ error->num_transcoders++; /* Account for eDP. */
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ enum transcoder cpu_transcoder = transcoders[i];
+
+ error->transcoder[i].power_domain_on =
+ intel_display_power_enabled_unlocked(dev_priv,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ if (!error->transcoder[i].power_domain_on)
+ continue;
+
+ error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+ error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ }
+
+ return error;
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+
+void
+intel_display_print_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error)
+{
+ int i;
+
+ if (!error)
+ return;
+
+ err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ err_printf(m, "PWR_WELL_CTL2: %08x\n",
+ error->power_well_driver);
+ for_each_pipe(i) {
+ err_printf(m, "Pipe [%d]:\n", i);
+ err_printf(m, " Power: %s\n",
+ error->pipe[i].power_domain_on ? "on" : "off");
+ err_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
+
+ err_printf(m, "Plane [%d]:\n", i);
+ err_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ if (INTEL_INFO(dev)->gen <= 3) {
+ err_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ err_printf(m, " POS: %08x\n", error->plane[i].pos);
+ }
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ err_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ }
+
+ err_printf(m, "Cursor [%d]:\n", i);
+ err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ err_printf(m, " POS: %08x\n", error->cursor[i].position);
+ err_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ }
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ err_printf(m, "CPU transcoder: %c\n",
+ transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " Power: %s\n",
+ error->transcoder[i].power_domain_on ? "on" : "off");
+ err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
+ err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
+ err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
+ err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
+ err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
+ err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
+ err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1349d9fd01c..8a1a4fbc06a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -26,124 +26,201 @@
*/
#include <linux/i2c.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "drm_dp_helper.h"
-
-#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
-#define DP_LINK_CONFIGURATION_SIZE 9
-
-#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
-
-struct intel_dp_priv {
- uint32_t output_reg;
- uint32_t DP;
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- uint32_t save_DP;
- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
- bool has_audio;
- int dpms_mode;
- uint8_t link_bw;
- uint8_t lane_count;
- uint8_t dpcd[4];
- struct intel_output *intel_output;
- struct i2c_adapter adapter;
- struct i2c_algo_dp_aux_data algo;
+struct dp_link_dpll {
+ int link_bw;
+ struct dpll dpll;
};
-static void
-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+static const struct dp_link_dpll gen4_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
-static void
-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
+static const struct dp_link_dpll pch_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
-void
-intel_edp_link_config (struct intel_output *intel_output,
- int *lane_num, int *link_bw)
+static const struct dp_link_dpll vlv_dpll[] = {
+ { DP_LINK_BW_1_62,
+ { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+ { DP_LINK_BW_2_7,
+ { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
+/*
+ * CHV supports eDP 1.4 that have more link rates.
+ * Below only provides the fixed rate but exclude variable rate.
+ */
+static const struct dp_link_dpll chv_dpll[] = {
+ /*
+ * CHV requires to program fractional division for m2.
+ * m2 is stored in fixed point format using formula below
+ * (m2_int << 22) | m2_fraction
+ */
+ { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
+ { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
+ { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
+ { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
+ { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
+ { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
+};
+
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- *lane_num = dp_priv->lane_count;
- if (dp_priv->link_bw == DP_LINK_BW_1_62)
- *link_bw = 162000;
- else if (dp_priv->link_bw == DP_LINK_BW_2_7)
- *link_bw = 270000;
+ return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static int
-intel_dp_max_lane_count(struct intel_output *intel_output)
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- int max_lane_count = 4;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- if (dp_priv->dpcd[0] >= 0x11) {
- max_lane_count = dp_priv->dpcd[2] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
- }
- }
- return max_lane_count;
+ return intel_dig_port->base.base.dev;
}
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+ return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+}
+
+static void intel_dp_link_down(struct intel_dp *intel_dp);
+static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+
static int
-intel_dp_max_link_bw(struct intel_output *intel_output)
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- int max_link_bw = dp_priv->dpcd[1];
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+ struct drm_device *dev = intel_dp->attached_connector->base.dev;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
break;
+ case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
+ if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
+ INTEL_INFO(dev)->gen >= 8) &&
+ intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
+ max_link_bw = DP_LINK_BW_5_4;
+ else
+ max_link_bw = DP_LINK_BW_2_7;
+ break;
default:
+ WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
+ max_link_bw);
max_link_bw = DP_LINK_BW_1_62;
break;
}
return max_link_bw;
}
-static int
-intel_dp_link_clock(uint8_t link_bw)
+static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
- if (link_bw == DP_LINK_BW_2_7)
- return 270000;
- else
- return 162000;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ u8 source_max, sink_max;
+
+ source_max = 4;
+ if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
+ (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
+ source_max = 2;
+
+ sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ return min(source_max, sink_max);
}
-/* I think this is a fiction */
+/*
+ * The units on the numbers in the next two are... bizarre. Examples will
+ * make it clearer; this one parallels an example in the eDP spec.
+ *
+ * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
+ *
+ * 270000 * 1 * 8 / 10 == 216000
+ *
+ * The actual data capacity of that configuration is 2.16Gbit/s, so the
+ * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
+ * or equivalently, kilopixels per second - so for 1680x1050R it'd be
+ * 119000. At 18bpp that's 2142000 kilobits per second.
+ *
+ * Thus the strange-looking division by 10 in intel_dp_link_required, to
+ * get the result in decakilobits instead of kilobits.
+ */
+
static int
-intel_dp_link_required(int pixel_clock)
+intel_dp_link_required(int pixel_clock, int bpp)
{
- return pixel_clock * 3;
+ return (pixel_clock * bpp + 9) / 10;
}
static int
+intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 8) / 10;
+}
+
+static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
- int max_lanes = intel_dp_max_lane_count(intel_output);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ int target_clock = mode->clock;
+ int max_rate, mode_rate, max_lanes, max_link_clock;
+
+ if (is_edp(intel_dp) && fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+
+ target_clock = fixed_mode->clock;
+ }
- if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+ max_lanes = intel_dp_max_lane_count(intel_dp);
+
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ mode_rate = intel_dp_link_required(target_clock, 18);
+
+ if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -177,6 +254,10 @@ intel_hrawclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
@@ -200,76 +281,332 @@ intel_hrawclk(struct drm_device *dev)
}
}
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out);
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out);
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ enum pipe pipe;
+
+ /* modeset should have pipe */
+ if (crtc)
+ return to_intel_crtc(crtc)->pipe;
+
+ /* init time, try to find a pipe with this port selected */
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+ if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
+ return pipe;
+ if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
+ return pipe;
+ }
+
+ /* shrug */
+ return PIPE_A;
+}
+
+static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (HAS_PCH_SPLIT(dev))
+ return PCH_PP_CONTROL;
+ else
+ return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+}
+
+static u32 _pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (HAS_PCH_SPLIT(dev))
+ return PCH_PP_STATUS;
+ else
+ return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+}
+
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+ This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+ edp_notifier);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_div;
+ u32 pp_ctrl_reg, pp_div_reg;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ if (!is_edp(intel_dp) || code != SYS_RESTART)
+ return 0;
+
+ if (IS_VALLEYVIEW(dev)) {
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+ pp_div = I915_READ(pp_div_reg);
+ pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+ /* 0x1F write to PP_DIV_REG sets max cycle delay */
+ I915_WRITE(pp_div_reg, pp_div | 0x1F);
+ I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+ msleep(intel_dp->panel_power_cycle_delay);
+ }
+
+ return 0;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ enum intel_display_power_domain power_domain;
+
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ return intel_display_power_enabled(dev_priv, power_domain) &&
+ (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
+}
+
+static void
+intel_dp_check_edp(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+ WARN(1, "eDP powered off while attempting aux channel communication.\n");
+ DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
+ I915_READ(_pp_stat_reg(intel_dp)),
+ I915_READ(_pp_ctrl_reg(intel_dp)));
+ }
+}
+
+static uint32_t
+intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ uint32_t status;
+ bool done;
+
+#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ if (has_aux_irq)
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(10));
+ else
+ done = wait_for_atomic(C, 10) == 0;
+ if (!done)
+ DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+ has_aux_irq);
+#undef C
+
+ return status;
+}
+
+static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2 and use that
+ */
+ return index ? 0 : intel_hrawclk(dev) / 2;
+}
+
+static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ if (index)
+ return 0;
+
+ if (intel_dig_port->port == PORT_A) {
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ return 200; /* SNB & IVB eDP input clock at 400Mhz */
+ else
+ return 225; /* eDP input clock at 450Mhz */
+ } else {
+ return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ }
+}
+
+static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (intel_dig_port->port == PORT_A) {
+ if (index)
+ return 0;
+ return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
+ } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ /* Workaround for non-ULT HSW */
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
+ } else {
+ return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ }
+}
+
+static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ return index ? 0 : 100;
+}
+
+static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t precharge, timeout;
+
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
static int
-intel_dp_aux_ch(struct intel_output *intel_output,
+intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t ch_data = ch_ctl + 4;
- int i;
- int recv_bytes;
- uint32_t ctl;
- uint32_t status;
uint32_t aux_clock_divider;
- int try;
+ int i, ret, recv_bytes;
+ uint32_t status;
+ int try, clock = 0;
+ bool has_aux_irq = HAS_AUX_IRQ(dev);
+ bool vdd;
- /* The clock divider is based off the hrawclk,
- * and would like to run at 2MHz. So, take the
- * hrawclk value and divide by 2 and use that
+ vdd = _edp_panel_vdd_on(intel_dp);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
*/
- if (IS_eDP(intel_output))
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (IS_IRONLAKE(dev))
- aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
- else
- aux_clock_divider = intel_hrawclk(dev) / 2;
-
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4) {
- uint32_t d = pack_aux(send + i, send_bytes - i);
-
- I915_WRITE(ch_data + i, d);
- }
-
- ctl = (DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- /* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl, ctl);
- (void) I915_READ(ch_ctl);
- for (;;) {
- udelay(100);
- status = I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+ intel_dp_check_edp(intel_dp);
+
+ intel_aux_display_runtime_get(dev_priv);
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = I915_READ_NOTRACE(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (try == 3) {
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Only 5 data registers! */
+ if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ has_aux_irq,
+ send_bytes,
+ aux_clock_divider);
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl, send_ctl);
+
+ status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
+ if (status & DP_AUX_CH_CTL_DONE)
break;
}
-
- /* Clear done status and any errors */
- I915_WRITE(ch_ctl, (status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR));
- (void) I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ if (status & DP_AUX_CH_CTL_DONE)
break;
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
/* Check for timeout or receive error.
@@ -277,557 +614,1941 @@ intel_dp_aux_ch(struct intel_output *intel_output,
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
if (recv_bytes > recv_size)
recv_bytes = recv_size;
-
- for (i = 0; i < recv_bytes; i += 4) {
- uint32_t d = I915_READ(ch_data + i);
- unpack_aux(d, recv + i, recv_bytes - i);
- }
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
+
+ ret = recv_bytes;
+out:
+ pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+ intel_aux_display_runtime_put(dev_priv);
+
+ if (vdd)
+ edp_panel_vdd_off(intel_dp, false);
- return recv_bytes;
+ return ret;
}
-/* Write data to the aux channel in native mode */
-static int
-intel_dp_aux_native_write(struct intel_output *intel_output,
- uint16_t address, uint8_t *send, int send_bytes)
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
+ struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ uint8_t txbuf[20], rxbuf[20];
+ size_t txsize, rxsize;
int ret;
- uint8_t msg[20];
- int msg_bytes;
- uint8_t ack;
-
- if (send_bytes > 16)
- return -1;
- msg[0] = AUX_NATIVE_WRITE << 4;
- msg[1] = address >> 8;
- msg[2] = address & 0xff;
- msg[3] = send_bytes - 1;
- memcpy(&msg[4], send, send_bytes);
- msg_bytes = send_bytes + 4;
- for (;;) {
- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
- if (ret < 0)
- return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
- break;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(100);
- else
- return -EIO;
- }
- return send_bytes;
-}
-/* Write a single byte to the aux channel in native mode */
-static int
-intel_dp_aux_native_write_1(struct intel_output *intel_output,
- uint16_t address, uint8_t byte)
-{
- return intel_dp_aux_native_write(intel_output, address, &byte, 1);
-}
+ txbuf[0] = msg->request << 4;
+ txbuf[1] = msg->address >> 8;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
-/* read bytes from a native aux channel */
-static int
-intel_dp_aux_native_read(struct intel_output *intel_output,
- uint16_t address, uint8_t *recv, int recv_bytes)
-{
- uint8_t msg[4];
- int msg_bytes;
- uint8_t reply[20];
- int reply_bytes;
- uint8_t ack;
- int ret;
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+ rxsize = 1;
- msg[0] = AUX_NATIVE_READ << 4;
- msg[1] = address >> 8;
- msg[2] = address & 0xff;
- msg[3] = recv_bytes - 1;
+ if (WARN_ON(txsize > 20))
+ return -E2BIG;
- msg_bytes = 4;
- reply_bytes = recv_bytes + 1;
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
- for (;;) {
- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
- reply, reply_bytes);
- if (ret == 0)
- return -EPROTO;
- if (ret < 0)
- return ret;
- ack = reply[0];
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
- memcpy(recv, reply + 1, ret - 1);
- return ret - 1;
+ ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+
+ /* Return payload size. */
+ ret = msg->size;
}
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(100);
- else
- return -EIO;
- }
-}
+ break;
-static int
-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct intel_dp_priv *dp_priv = container_of(adapter,
- struct intel_dp_priv,
- adapter);
- struct intel_output *intel_output = dp_priv->intel_output;
- uint16_t address = algo_data->address;
- uint8_t msg[5];
- uint8_t reply[2];
- int msg_bytes;
- int reply_bytes;
- int ret;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+ rxsize = msg->size + 1;
+
+ if (WARN_ON(rxsize > 20))
+ return -E2BIG;
+
+ ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+ /*
+ * Assume happy day, and copy the data. The caller is
+ * expected to check msg->reply before touching it.
+ *
+ * Return payload size.
+ */
+ ret--;
+ memcpy(msg->buffer, rxbuf + 1, ret);
+ }
+ break;
- /* Set up the command byte */
- if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
- else
- msg[0] = AUX_I2C_WRITE << 4;
+ default:
+ ret = -EINVAL;
+ break;
+ }
- if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
+ return ret;
+}
- msg[1] = address >> 8;
- msg[2] = address;
+static void
+intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ enum port port = intel_dig_port->port;
+ const char *name = NULL;
+ int ret;
- switch (mode) {
- case MODE_I2C_WRITE:
- msg[3] = 0;
- msg[4] = write_byte;
- msg_bytes = 5;
- reply_bytes = 1;
+ switch (port) {
+ case PORT_A:
+ intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
+ name = "DPDDC-A";
break;
- case MODE_I2C_READ:
- msg[3] = 0;
- msg_bytes = 4;
- reply_bytes = 2;
+ case PORT_B:
+ intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
+ name = "DPDDC-B";
break;
- default:
- msg_bytes = 3;
- reply_bytes = 1;
+ case PORT_C:
+ intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
+ name = "DPDDC-C";
+ break;
+ case PORT_D:
+ intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
+ name = "DPDDC-D";
break;
+ default:
+ BUG();
}
- for (;;) {
- ret = intel_dp_aux_ch(intel_output,
- msg, msg_bytes,
- reply, reply_bytes);
- if (ret < 0) {
- DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
- return ret;
- }
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
- if (mode == MODE_I2C_READ) {
- *read_byte = reply[1];
+ if (!HAS_DDI(dev))
+ intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
+
+ intel_dp->aux.name = name;
+ intel_dp->aux.dev = dev->dev;
+ intel_dp->aux.transfer = intel_dp_aux_transfer;
+
+ DRM_DEBUG_KMS("registering %s bus for %s\n", name,
+ connector->base.kdev->kobj.name);
+
+ ret = drm_dp_aux_register(&intel_dp->aux);
+ if (ret < 0) {
+ DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
+ name, ret);
+ return;
+ }
+
+ ret = sysfs_create_link(&connector->base.kdev->kobj,
+ &intel_dp->aux.ddc.dev.kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
+ if (ret < 0) {
+ DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
+ drm_dp_aux_unregister(&intel_dp->aux);
+ }
+}
+
+static void
+intel_dp_connector_unregister(struct intel_connector *intel_connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
+
+ sysfs_remove_link(&intel_connector->base.kdev->kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
+ intel_connector_unregister(intel_connector);
+}
+
+static void
+intel_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config, int link_bw)
+{
+ struct drm_device *dev = encoder->base.dev;
+ const struct dp_link_dpll *divisor = NULL;
+ int i, count = 0;
+
+ if (IS_G4X(dev)) {
+ divisor = gen4_dpll;
+ count = ARRAY_SIZE(gen4_dpll);
+ } else if (IS_HASWELL(dev)) {
+ /* Haswell has special-purpose DP DDI clocks. */
+ } else if (HAS_PCH_SPLIT(dev)) {
+ divisor = pch_dpll;
+ count = ARRAY_SIZE(pch_dpll);
+ } else if (IS_CHERRYVIEW(dev)) {
+ divisor = chv_dpll;
+ count = ARRAY_SIZE(chv_dpll);
+ } else if (IS_VALLEYVIEW(dev)) {
+ divisor = vlv_dpll;
+ count = ARRAY_SIZE(vlv_dpll);
+ }
+
+ if (divisor && count) {
+ for (i = 0; i < count; i++) {
+ if (link_bw == divisor[i].link_bw) {
+ pipe_config->dpll = divisor[i].dpll;
+ pipe_config->clock_set = true;
+ break;
}
- return reply_bytes - 1;
- case AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("aux_ch nack\n");
- return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
- DRM_DEBUG_KMS("aux_ch defer\n");
- udelay(100);
- break;
- default:
- DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
- return -EREMOTEIO;
}
}
}
-static int
-intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
+static void
+intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
-
- DRM_DEBUG_KMS("i2c_init %s\n", name);
- dp_priv->algo.running = false;
- dp_priv->algo.address = 0;
- dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder transcoder = crtc->config.cpu_transcoder;
- memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
- dp_priv->adapter.owner = THIS_MODULE;
- dp_priv->adapter.class = I2C_CLASS_DDC;
- strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
- dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
- dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_output->base.kdev;
-
- return i2c_dp_aux_add_bus(&dp_priv->adapter);
+ I915_WRITE(PIPE_DATA_M2(transcoder),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
+ I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
+ I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
}
-static bool
-intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool
+intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct intel_crtc *intel_crtc = encoder->new_crtc;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_output);
- int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
- static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
-
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = 0; clock <= max_clock; clock++) {
- int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
-
- if (intel_dp_link_required(mode->clock) <= link_avail) {
- dp_priv->link_bw = bws[clock];
- dp_priv->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
- DRM_DEBUG_KMS("Display port link bw %02x lane "
- "count %d clock %d\n",
- dp_priv->link_bw, dp_priv->lane_count,
- adjusted_mode->clock);
- return true;
+ int min_lane_count = 1;
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ /* Conveniently, the link BW constants become indices with a shift...*/
+ int min_clock = 0;
+ int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
+ int bpp, mode_rate;
+ static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ int link_avail, link_clock;
+
+ if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+ pipe_config->has_pch_encoder = true;
+
+ pipe_config->has_dp_encoder = true;
+ pipe_config->has_audio = intel_dp->has_audio;
+
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+ if (!HAS_PCH_SPLIT(dev))
+ intel_gmch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ else
+ intel_pch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ }
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max bw %02x pixel clock %iKHz\n",
+ max_lane_count, bws[max_clock],
+ adjusted_mode->crtc_clock);
+
+ /* Walk through all bpp values. Luckily they're all nicely spaced with 2
+ * bpc in between. */
+ bpp = pipe_config->pipe_bpp;
+ if (is_edp(intel_dp)) {
+ if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+ DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp_bpp);
+ bpp = dev_priv->vbt.edp_bpp;
+ }
+
+ if (IS_BROADWELL(dev)) {
+ /* Yes, it's an ugly hack. */
+ min_lane_count = max_lane_count;
+ DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
+ min_lane_count);
+ } else if (dev_priv->vbt.edp_lanes) {
+ min_lane_count = min(dev_priv->vbt.edp_lanes,
+ max_lane_count);
+ DRM_DEBUG_KMS("using min %u lanes per VBT\n",
+ min_lane_count);
+ }
+
+ if (dev_priv->vbt.edp_rate) {
+ min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
+ DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
+ bws[min_clock]);
+ }
+ }
+
+ for (; bpp >= 6*3; bpp -= 2*3) {
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ bpp);
+
+ for (clock = min_clock; clock <= max_clock; clock++) {
+ for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
+ link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ goto found;
+ }
}
}
}
+
return false;
-}
-struct intel_dp_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
+found:
+ if (intel_dp->color_range_auto) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ else
+ intel_dp->color_range = 0;
+ }
-static void
-intel_reduce_ratio(uint32_t *num, uint32_t *den)
-{
- while (*num > 0xffffff || *den > 0xffffff) {
- *num >>= 1;
- *den >>= 1;
+ if (intel_dp->color_range)
+ pipe_config->limited_color_range = true;
+
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+
+ DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ pipe_config->port_clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
+
+ intel_link_compute_m_n(bpp, lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ if (intel_connector->panel.downclock_mode != NULL &&
+ intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+ intel_link_compute_m_n(bpp, lane_count,
+ intel_connector->panel.downclock_mode->clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m2_n2);
}
+
+ intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+
+ return true;
}
-static void
-intel_dp_compute_m_n(int bytes_per_pixel,
- int nlanes,
- int pixel_clock,
- int link_clock,
- struct intel_dp_m_n *m_n)
+static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
- m_n->tu = 64;
- m_n->gmch_m = pixel_clock * bytes_per_pixel;
- m_n->gmch_n = link_clock * nlanes;
- intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- m_n->link_m = pixel_clock;
- m_n->link_n = link_clock;
- intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+ if (crtc->config.port_clock == 162000) {
+ /* For a long time we've carried around a ILK-DevA w/a for the
+ * 160MHz clock. If we're really unlucky, it's still required.
+ */
+ DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+ dpa_ctl |= DP_PLL_FREQ_160MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ } else {
+ dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ }
+
+ I915_WRITE(DP_A, dpa_ctl);
+
+ POSTING_READ(DP_A);
+ udelay(500);
}
-void
-intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_dp_prepare(struct intel_encoder *encoder)
{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4;
- struct intel_dp_m_n m_n;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
/*
- * Find the lane count in the intel_output private
+ * There are four kinds of DP registers:
+ *
+ * IBX PCH
+ * SNB CPU
+ * IVB CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ironlake_pch_enable
*/
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- if (!connector->encoder || connector->encoder->crtc != crtc)
- continue;
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
- if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
- lane_count = dp_priv->lane_count;
- break;
- }
+ /* Handle DP bits in common between all three register formats */
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
+
+ if (crtc->config.has_audio) {
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_write_eld(&encoder->base, adjusted_mode);
}
- /*
- * Compute the GMCH and Link ratios. The '3' here is
- * the number of bytes_per_pixel post-LUT, which we always
- * set up for 8-bits of R/G/B, or 3 bytes total.
- */
- intel_dp_compute_m_n(3, lane_count,
- mode->clock, adjusted_mode->clock, &m_n);
-
- if (IS_IRONLAKE(dev)) {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(TRANSA_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
+ /* Split out the IBX/CPU vs CPT settings */
+
+ if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= crtc->pipe << 29;
+ } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
+ if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (!IS_CHERRYVIEW(dev)) {
+ if (crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
} else {
- I915_WRITE(TRANSB_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
+ intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
}
} else {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(PIPEA_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEA_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
- } else {
- I915_WRITE(PIPEB_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEB_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
- }
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
}
-static void
-intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
+
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- struct drm_crtc *crtc = intel_output->enc.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_stat_reg, pp_ctrl_reg;
- dp_priv->DP = (DP_LINK_TRAIN_OFF |
- DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0 |
- DP_SYNC_VS_HIGH |
- DP_SYNC_HS_HIGH);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- switch (dp_priv->lane_count) {
- case 1:
- dp_priv->DP |= DP_PORT_WIDTH_1;
- break;
- case 2:
- dp_priv->DP |= DP_PORT_WIDTH_2;
- break;
- case 4:
- dp_priv->DP |= DP_PORT_WIDTH_4;
- break;
+ DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
+
+ if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
+ DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
}
- if (dp_priv->has_audio)
- dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
- memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- dp_priv->link_configuration[0] = dp_priv->link_bw;
- dp_priv->link_configuration[1] = dp_priv->lane_count;
+ DRM_DEBUG_KMS("Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power on\n");
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power off time\n");
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power cycle\n");
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
+ intel_dp->panel_power_cycle_delay);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
+ intel_dp->backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
+ intel_dp->backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 control;
+
+ control = I915_READ(_pp_ctrl_reg(intel_dp));
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ return control;
+}
+
+static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ u32 pp;
+ u32 pp_stat_reg, pp_ctrl_reg;
+ bool need_to_disable = !intel_dp->want_panel_vdd;
+
+ if (!is_edp(intel_dp))
+ return false;
+
+ intel_dp->want_panel_vdd = true;
+
+ if (edp_have_panel_vdd(intel_dp))
+ return need_to_disable;
+
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ DRM_DEBUG_KMS("Turning eDP VDD on\n");
+
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
+ pp = ironlake_get_pp_control(intel_dp);
+ pp |= EDP_FORCE_VDD;
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
/*
- * Check for DPCD version > 1.1,
- * enable enahanced frame stuff in that case
+ * If the panel wasn't on, delay before accessing aux channel
*/
- if (dp_priv->dpcd[0] >= 0x11) {
- dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- dp_priv->DP |= DP_ENHANCED_FRAMING;
+ if (!edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP was not running\n");
+ msleep(intel_dp->panel_power_up_delay);
}
- if (intel_crtc->pipe == 1)
- dp_priv->DP |= DP_PIPEB_SELECT;
+ return need_to_disable;
+}
+
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ if (is_edp(intel_dp)) {
+ bool vdd = _edp_panel_vdd_on(intel_dp);
- if (IS_eDP(intel_output)) {
- /* don't miss out required setting for eDP */
- dp_priv->DP |= DP_PLL_ENABLE;
- if (adjusted_mode->clock < 200000)
- dp_priv->DP |= DP_PLL_FREQ_160MHZ;
- else
- dp_priv->DP |= DP_PLL_FREQ_270MHZ;
+ WARN(!vdd, "eDP VDD already requested on\n");
}
}
-static void ironlake_edp_backlight_on (struct drm_device *dev)
+static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ u32 pp_stat_reg, pp_ctrl_reg;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
+ struct intel_digital_port *intel_dig_port =
+ dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ enum intel_display_power_domain power_domain;
+
+ DRM_DEBUG_KMS("Turning eDP VDD off\n");
+
+ pp = ironlake_get_pp_control(intel_dp);
+ pp &= ~EDP_FORCE_VDD;
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+
+ if ((pp & POWER_TARGET_ON) == 0)
+ intel_dp->last_power_cycle = jiffies;
+
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_put(dev_priv, power_domain);
+ }
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+ struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
+ struct intel_dp, panel_vdd_work);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ edp_panel_vdd_off_sync(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+ if (!is_edp(intel_dp))
+ return;
+
+ WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+
+ intel_dp->want_panel_vdd = false;
+
+ if (sync) {
+ edp_panel_vdd_off_sync(intel_dp);
+ } else {
+ /*
+ * Queue the timer to fire a long
+ * time from now (relative to the power down delay)
+ * to keep the panel power up across a sequence of operations
+ */
+ schedule_delayed_work(&intel_dp->panel_vdd_work,
+ msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+ }
+}
+
+void intel_edp_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power on\n");
+
+ if (edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP power already on\n");
+ return;
+ }
+
+ wait_panel_power_cycle(intel_dp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ironlake_get_pp_control(intel_dp);
+ if (IS_GEN5(dev)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ }
+
+ pp |= POWER_TARGET_ON;
+ if (!IS_GEN5(dev))
+ pp |= PANEL_POWER_RESET;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ wait_panel_on(intel_dp);
+ intel_dp->last_power_on = jiffies;
+
+ if (IS_GEN5(dev)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ }
+}
+
+void intel_edp_panel_off(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power off\n");
+
+ edp_wait_backlight_off(intel_dp);
+
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
+ pp = ironlake_get_pp_control(intel_dp);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_dp->want_panel_vdd = false;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ intel_dp->last_power_cycle = jiffies;
+ wait_panel_off(intel_dp);
+
+ /* We got a reference when we enabled the VDD. */
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_put(dev_priv, power_domain);
+}
+
+void intel_edp_backlight_on(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
DRM_DEBUG_KMS("\n");
- pp = I915_READ(PCH_PP_CONTROL);
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ wait_backlight_on(intel_dp);
+ pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- I915_WRITE(PCH_PP_CONTROL, pp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ intel_panel_enable_backlight(intel_dp->attached_connector);
}
-static void ironlake_edp_backlight_off (struct drm_device *dev)
+void intel_edp_backlight_off(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ intel_panel_disable_backlight(intel_dp->attached_connector);
DRM_DEBUG_KMS("\n");
- pp = I915_READ(PCH_PP_CONTROL);
+ pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- I915_WRITE(PCH_PP_CONTROL, pp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ intel_dp->last_backlight_off = jiffies;
}
-static void
-intel_dp_dpms(struct drm_encoder *encoder, int mode)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+ u32 dpa_ctl;
+
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
+ POSTING_READ(DP_A);
+ udelay(200);
+}
+
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
+ dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
+ udelay(200);
+}
+
+/* If the sink supports it, try to set the power state appropriately */
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
if (mode != DRM_MODE_DPMS_ON) {
- if (dp_reg & DP_PORT_EN) {
- intel_dp_link_down(intel_output, dp_priv->DP);
- if (IS_eDP(intel_output))
- ironlake_edp_backlight_off(dev);
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ msleep(1);
}
+ }
+}
+
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ tmp = I915_READ(intel_dp->output_reg);
+
+ if (!(tmp & DP_PORT_EN))
+ return false;
+
+ if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *pipe = DP_PORT_TO_PIPE_CHV(tmp);
+ } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
+ *pipe = PORT_TO_PIPE(tmp);
} else {
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
- if (IS_eDP(intel_output))
- ironlake_edp_backlight_on(dev);
+ u32 trans_sel;
+ u32 trans_dp;
+ int i;
+
+ switch (intel_dp->output_reg) {
+ case PCH_DP_B:
+ trans_sel = TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ trans_sel = TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ trans_sel = TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ return true;
}
+
+ for_each_pipe(i) {
+ trans_dp = I915_READ(TRANS_DP_CTL(i));
+ if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+ *pipe = i;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+ intel_dp->output_reg);
}
- dp_priv->dpms_mode = mode;
+
+ return true;
}
-/*
- * Fetch AUX CH registers 0x202 - 0x207 which contain
- * link status information
- */
-static bool
-intel_dp_get_link_status(struct intel_output *intel_output,
- uint8_t link_status[DP_LINK_STATUS_SIZE])
+static void intel_dp_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- int ret;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ u32 tmp, flags = 0;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int dotclock;
- ret = intel_dp_aux_native_read(intel_output,
- DP_LANE0_1_STATUS,
- link_status, DP_LINK_STATUS_SIZE);
- if (ret != DP_LINK_STATUS_SIZE)
+ tmp = I915_READ(intel_dp->output_reg);
+ if (tmp & DP_AUDIO_OUTPUT_ENABLE)
+ pipe_config->has_audio = true;
+
+ if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
+ if (tmp & DP_SYNC_HS_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & DP_SYNC_VS_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ } else {
+ tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ }
+
+ pipe_config->adjusted_mode.flags |= flags;
+
+ pipe_config->has_dp_encoder = true;
+
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ if (port == PORT_A) {
+ if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+ pipe_config->port_clock = 162000;
+ else
+ pipe_config->port_clock = 270000;
+ }
+
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ }
+}
+
+static bool is_edp_psr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return dev_priv->psr.sink_support;
+}
+
+static bool intel_edp_is_psr_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev))
return false;
+
+ return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+}
+
+static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
+ struct edp_vsc_psr *vsc_psr)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ uint32_t *data = (uint32_t *) vsc_psr;
+ unsigned int i;
+
+ /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+ the video DIP being updated before program video DIP data buffer
+ registers for DIP being updated. */
+ I915_WRITE(ctl_reg, 0);
+ POSTING_READ(ctl_reg);
+
+ for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+ if (i < sizeof(struct edp_vsc_psr))
+ I915_WRITE(data_reg + i, *data++);
+ else
+ I915_WRITE(data_reg + i, 0);
+ }
+
+ I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_edp_psr_setup(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_vsc_psr psr_vsc;
+
+ if (intel_dp->psr_setup_done)
+ return;
+
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+
+ intel_dp->psr_setup_done = true;
+}
+
+static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t aux_clock_divider;
+ int precharge = 0x3;
+ int msg_size = 5; /* Header(4) + Message(1) */
+
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+ /* Enable PSR in sink */
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Setup AUX registers */
+ I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
+ I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
+ I915_WRITE(EDP_PSR_AUX_CTL(dev),
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t max_sleep_time = 0x1f;
+ uint32_t idle_frames = 1;
+ uint32_t val = 0x0;
+ const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ val |= EDP_PSR_LINK_STANDBY;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_SKIP_AUX_EXIT;
+ } else
+ val |= EDP_PSR_LINK_DISABLE;
+
+ I915_WRITE(EDP_PSR_CTL(dev), val |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
+ max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+ idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+ EDP_PSR_ENABLE);
+}
+
+static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+
+ dev_priv->psr.source_ok = false;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return false;
+ }
+
+ if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
+ (dig_port->port != PORT_A)) {
+ DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ return false;
+ }
+
+ if (!i915.enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ return false;
+ }
+
+ crtc = dig_port->base.base.crtc;
+ if (crtc == NULL) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ return false;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ if (!intel_crtc_active(crtc)) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ return false;
+ }
+
+ obj = to_intel_framebuffer(crtc->primary->fb)->obj;
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
+ return false;
+ }
+
+ if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
+ return false;
+ }
+
+ if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ S3D_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ return false;
+ }
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ return false;
+ }
+
+ dev_priv->psr.source_ok = true;
return true;
}
-static uint8_t
-intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int r)
+static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
- return link_status[r - DP_LANE0_1_STATUS];
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (!intel_edp_psr_match_conditions(intel_dp) ||
+ intel_edp_is_psr_enabled(dev))
+ return;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ /* Enable PSR on the panel */
+ intel_edp_psr_enable_sink(intel_dp);
+
+ /* Enable PSR on the host */
+ intel_edp_psr_enable_source(intel_dp);
}
-static void
-intel_dp_save(struct drm_connector *connector)
+void intel_edp_psr_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp) &&
+ !intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+}
+
+void intel_edp_psr_disable(struct intel_dp *intel_dp)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- dp_priv->save_DP = I915_READ(dp_priv->output_reg);
- intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
- dp_priv->save_link_configuration,
- sizeof (dp_priv->save_link_configuration));
+ if (!intel_edp_is_psr_enabled(dev))
+ return;
+
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
}
-static uint8_t
-intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane)
+void intel_edp_psr_update(struct drm_device *dev)
{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = intel_dp_link_status(link_status, i);
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp = NULL;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
- return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ if (!is_edp_psr(dev))
+ return;
+
+ if (!intel_edp_psr_match_conditions(intel_dp))
+ intel_edp_psr_disable(intel_dp);
+ else
+ if (!intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+ }
}
-static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane)
+static void intel_disable_dp(struct intel_encoder *encoder)
{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = intel_dp_link_status(link_status, i);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_backlight_off(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ intel_edp_panel_off(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
+ intel_dp_link_down(intel_dp);
+}
- return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+static void g4x_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+
+ if (port != PORT_A)
+ return;
+
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
}
+static void vlv_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-#if 0
-static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char *link_train_names[] = {
- "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
+ intel_dp_link_down(intel_dp);
+}
+
+static void chv_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ intel_dp_link_down(intel_dp);
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Propagate soft reset to data lane reset */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
+
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+}
+
+static void g4x_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_enable_dp(encoder);
+ intel_edp_backlight_on(intel_dp);
+}
+
+static void vlv_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_edp_backlight_on(intel_dp);
+}
+
+static void g4x_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+
+ intel_dp_prepare(encoder);
+
+ /* Only ilk+ has port A */
+ if (dport->port == PORT_A) {
+ ironlake_set_pll_cpu_edp(intel_dp);
+ ironlake_edp_pll_on(intel_dp);
+ }
+}
+
+static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ struct edp_power_seq power_seq;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
+
+ intel_enable_dp(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ intel_dp_prepare(encoder);
+
+ /* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void chv_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq power_seq;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Deassert soft data lane reset*/
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ /* Program Tx lane latency optimal setting*/
+ for (i = 0; i < 4; i++) {
+ /* Set the latency optimal bit */
+ data = (i == 1) ? 0x0 : 0x6;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
+ data << DPIO_FRC_LATENCY_SHFIT);
+
+ /* Set the upar bit */
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ /* FIXME: Fix up value only after power analysis */
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
+
+ intel_enable_dp(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ *
+ * Sinks are *supposed* to come up within 1ms from an off state, but we're also
+ * supposed to retry 3 times per the spec.
+ */
+static ssize_t
+intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size)
+{
+ ssize_t ret;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret == size)
+ return ret;
+ msleep(1);
+ }
+
+ return ret;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ return intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
+}
/*
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+
+ if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else if (IS_GEN7(dev) && port == PORT_A)
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && port != PORT_A)
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+
+ if (IS_BROADWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_HASWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_VALLEYVIEW(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_GEN7(dev) && port == PORT_A) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ }
+}
+
+static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dport->base.base.crtc);
+ unsigned long demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value;
+ uint8_t train_set = intel_dp->train_set[0];
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ preemph_reg_value = 0x0004000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x552AB83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5548B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ demph_reg_value = 0x2B245555;
+ uniqtranscale_reg_value = 0x5560B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x5598DA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ preemph_reg_value = 0x0002000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5552B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ demph_reg_value = 0x2B404848;
+ uniqtranscale_reg_value = 0x5580B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ preemph_reg_value = 0x0000000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x2B305555;
+ uniqtranscale_reg_value = 0x5570B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ demph_reg_value = 0x2B2B4040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ preemph_reg_value = 0x0006000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x1B405555;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ return 0;
}
+
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return 0;
+}
+
+static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ u32 deemph_reg_value, margin_reg_value, val;
+ uint8_t train_set = intel_dp->train_set[0];
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ int i;
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 128;
+ margin_reg_value = 52;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 128;
+ margin_reg_value = 77;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ deemph_reg_value = 128;
+ margin_reg_value = 102;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ deemph_reg_value = 128;
+ margin_reg_value = 154;
+ /* FIXME extra to set for 1200 */
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 85;
+ margin_reg_value = 78;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 85;
+ margin_reg_value = 116;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ deemph_reg_value = 85;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 64;
+ margin_reg_value = 104;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 64;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 43;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* Program swing deemph */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ /* Program swing margin */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~DPIO_SWING_MARGIN_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /* Disable unique transition scale */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
+ == DP_TRAIN_PRE_EMPHASIS_0) &&
+ ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
+ == DP_TRAIN_VOLTAGE_SWING_1200)) {
+
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+ }
+
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* LRC Bypass */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ val |= DPIO_LRC_BYPASS;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return 0;
}
static void
-intel_get_adjust_train(struct intel_output *intel_output,
- uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane_count,
- uint8_t train_set[4])
+intel_get_adjust_train(struct intel_dp *intel_dp,
+ const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
- for (lane = 0; lane < lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@@ -835,18 +2556,20 @@ intel_get_adjust_train(struct intel_output *intel_output,
p = this_p;
}
- if (v >= I830_DP_VOLTAGE_MAX)
- v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+ voltage_max = intel_dp_voltage_max(intel_dp);
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
- train_set[lane] = v | p;
+ intel_dp->train_set[lane] = v | p;
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set, int lane_count)
+intel_gen4_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -883,226 +2606,688 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
return signal_levels;
}
-static uint8_t
-intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane)
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ }
+}
+
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
{
- int i = DP_LANE0_1_STATUS + (lane >> 1);
- int s = (lane & 1) * 4;
- uint8_t l = intel_dp_link_status(link_status, i);
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
- return (l >> s) & 0xf;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
}
-/* Check for clock recovery is done on all channels */
-static bool
-intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_hsw_signal_levels(uint8_t train_set)
{
- int lane;
- uint8_t lane_status;
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+ return DDI_BUF_EMP_400MV_9_5DB_HSW;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_HSW;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_HSW;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ }
+}
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
+static uint32_t
+intel_bdw_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
+
+ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
}
- return true;
}
-/* Check to see if channel eq is done on all channels */
-#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
- DP_LANE_CHANNEL_EQ_DONE|\
- DP_LANE_SYMBOL_LOCKED)
+/* Properly updates "DP" with the correct signal levels. */
+static void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ enum port port = intel_dig_port->port;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t signal_levels, mask;
+ uint8_t train_set = intel_dp->train_set[0];
+
+ if (IS_BROADWELL(dev)) {
+ signal_levels = intel_bdw_signal_levels(train_set);
+ mask = DDI_BUF_EMP_MASK;
+ } else if (IS_HASWELL(dev)) {
+ signal_levels = intel_hsw_signal_levels(train_set);
+ mask = DDI_BUF_EMP_MASK;
+ } else if (IS_CHERRYVIEW(dev)) {
+ signal_levels = intel_chv_signal_levels(intel_dp);
+ mask = 0;
+ } else if (IS_VALLEYVIEW(dev)) {
+ signal_levels = intel_vlv_signal_levels(intel_dp);
+ mask = 0;
+ } else if (IS_GEN7(dev) && port == PORT_A) {
+ signal_levels = intel_gen7_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ } else if (IS_GEN6(dev) && port == PORT_A) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ } else {
+ signal_levels = intel_gen4_signal_levels(train_set);
+ mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+ }
+
+ DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+ *DP = (*DP & ~mask) | signal_levels;
+}
+
static bool
-intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+intel_dp_set_link_train(struct intel_dp *intel_dp,
+ uint32_t *DP,
+ uint8_t dp_train_pat)
{
- uint8_t lane_align;
- uint8_t lane_status;
- int lane;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ uint8_t buf[sizeof(intel_dp->train_set) + 1];
+ int ret, len;
- lane_align = intel_dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
- return false;
+ if (HAS_DDI(dev)) {
+ uint32_t temp = I915_READ(DP_TP_CTL(port));
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ }
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
+ *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
+
+ } else {
+ *DP &= ~DP_LINK_TRAIN_MASK;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ break;
+ }
}
- return true;
+
+ I915_WRITE(intel_dp->output_reg, *DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ buf[0] = dp_train_pat;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
+ DP_TRAINING_PATTERN_DISABLE) {
+ /* don't write DP_TRAINING_LANEx_SET on disable */
+ len = 1;
+ } else {
+ /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+ memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
+ len = intel_dp->lane_count + 1;
+ }
+
+ ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
+ buf, len);
+
+ return ret == len;
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+ uint8_t dp_train_pat)
+{
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ intel_dp_set_signal_levels(intel_dp, DP);
+ return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
}
static bool
-intel_dp_set_link_train(struct intel_output *intel_output,
- uint32_t dp_reg_value,
- uint8_t dp_train_pat,
- uint8_t train_set[4],
- bool first)
+intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+ const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
int ret;
- I915_WRITE(dp_priv->output_reg, dp_reg_value);
- POSTING_READ(dp_priv->output_reg);
- if (first)
- intel_wait_for_vblank(dev);
+ intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_set_signal_levels(intel_dp, DP);
- intel_dp_aux_native_write_1(intel_output,
- DP_TRAINING_PATTERN_SET,
- dp_train_pat);
+ I915_WRITE(intel_dp->output_reg, *DP);
+ POSTING_READ(intel_dp->output_reg);
- ret = intel_dp_aux_native_write(intel_output,
- DP_TRAINING_LANE0_SET, train_set, 4);
- if (ret != 4)
- return false;
+ ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, intel_dp->lane_count);
- return true;
+ return ret == intel_dp->lane_count;
}
-static void
-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- uint8_t train_set[4];
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ enum port port = intel_dig_port->port;
+ uint32_t val;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ /*
+ * On PORT_A we can have only eDP in SST mode. There the only reason
+ * we need to set idle transmission mode is to work around a HW issue
+ * where we enable the pipe while not in idle link-training mode.
+ * In this case there is requirement to wait for a minimum number of
+ * idle patterns to be sent.
+ */
+ if (port == PORT_A)
+ return;
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+ 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
+/* Enable corresponding port and start training pattern 1 */
+void
+intel_dp_start_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+ struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
- bool clock_recovery = false;
- bool channel_eq = false;
- bool first = true;
- int tries;
+ int voltage_tries, loop_tries;
+ uint32_t DP = intel_dp->DP;
+ uint8_t link_config[2];
+
+ if (HAS_DDI(dev))
+ intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_output, 0x100,
- link_configuration, DP_LINK_CONFIGURATION_SIZE);
+ link_config[0] = intel_dp->link_bw;
+ link_config[1] = intel_dp->lane_count;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
+ link_config[0] = 0;
+ link_config[1] = DP_SET_ANSI_8B10B;
+ drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
DP |= DP_PORT_EN;
- DP &= ~DP_LINK_TRAIN_MASK;
- memset(train_set, 0, 4);
+
+ /* clock recovery */
+ if (!intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to enable link training\n");
+ return;
+ }
+
voltage = 0xff;
- tries = 0;
- clock_recovery = false;
+ voltage_tries = 0;
+ loop_tries = 0;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
-
- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
- DP_TRAINING_PATTERN_1, train_set, first))
- break;
- first = false;
- /* Set training pattern 1 */
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
- udelay(100);
- if (!intel_dp_get_link_status(intel_output, link_status))
+ drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
break;
+ }
- if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
- clock_recovery = true;
+ if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("clock recovery OK\n");
break;
}
/* Check to see if we've tried the max voltage */
- for (i = 0; i < dp_priv->lane_count; i++)
- if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == dp_priv->lane_count)
- break;
+ if (i == intel_dp->lane_count) {
+ ++loop_tries;
+ if (loop_tries == 5) {
+ DRM_ERROR("too many full retries, give up\n");
+ break;
+ }
+ intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ voltage_tries = 0;
+ continue;
+ }
/* Check to see if we've tried the same voltage 5 times */
- if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_ERROR("too many voltage retries, give up\n");
break;
+ }
} else
- tries = 0;
- voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ voltage_tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ /* Update training set as requested by target */
+ if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+ DRM_ERROR("failed to update link training\n");
+ break;
+ }
}
+ intel_dp->DP = DP;
+}
+
+void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+ bool channel_eq = false;
+ int tries, cr_tries;
+ uint32_t DP = intel_dp->DP;
+ uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+
+ /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
+ if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
+ training_pattern = DP_TRAINING_PATTERN_3;
+
/* channel equalization */
+ if (!intel_dp_set_link_train(intel_dp, &DP,
+ training_pattern |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to start channel equalization\n");
+ return;
+ }
+
tries = 0;
+ cr_tries = 0;
channel_eq = false;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
-
- /* channel eq pattern */
- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
- DP_TRAINING_PATTERN_2, train_set,
- false))
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
break;
+ }
- udelay(400);
- if (!intel_dp_get_link_status(intel_output, link_status))
+ drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
break;
+ }
+
+ /* Make sure clock is still ok */
+ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_set_link_train(intel_dp, &DP,
+ training_pattern |
+ DP_LINK_SCRAMBLING_DISABLE);
+ cr_tries++;
+ continue;
+ }
- if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+ if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
- /* Try 5 times */
- if (tries > 5)
- break;
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ intel_dp_link_down(intel_dp);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_set_link_train(intel_dp, &DP,
+ training_pattern |
+ DP_LINK_SCRAMBLING_DISABLE);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ /* Update training set as requested by target */
+ if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+ DRM_ERROR("failed to update link training\n");
+ break;
+ }
++tries;
}
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
- POSTING_READ(dp_priv->output_reg);
- intel_dp_aux_native_write_1(intel_output,
- DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+ intel_dp_set_idle_link_train(intel_dp);
+
+ intel_dp->DP = DP;
+
+ if (channel_eq)
+ DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+ intel_dp_set_link_train(intel_dp, &intel_dp->DP,
+ DP_TRAINING_PATTERN_DISABLE);
}
static void
-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ enum port port = intel_dig_port->port;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ uint32_t DP = intel_dp->DP;
+
+ if (WARN_ON(HAS_DDI(dev)))
+ return;
+
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+ return;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_output)) {
- DP &= ~DP_PLL_ENABLE;
- I915_WRITE(dp_priv->output_reg, DP);
- POSTING_READ(dp_priv->output_reg);
- udelay(100);
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
}
+ POSTING_READ(intel_dp->output_reg);
+
+ if (HAS_PCH_IBX(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
- DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ if (WARN_ON(crtc == NULL)) {
+ /* We should never try to disable a port without a crtc
+ * attached. For paranoia keep the code around for a
+ * bit. */
+ POSTING_READ(intel_dp->output_reg);
+ msleep(50);
+ } else
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+ msleep(intel_dp->panel_power_down_delay);
+}
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
+
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
+ sizeof(intel_dp->dpcd)) < 0)
+ return false; /* aux transfer failed */
- udelay(17000);
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
- if (IS_eDP(intel_output))
- DP |= DP_LINK_TRAIN_OFF;
- I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(dp_priv->output_reg);
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+ return false; /* DPCD not present */
+
+ /* Check if the panel supports PSR */
+ memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
+ if (is_edp(intel_dp)) {
+ intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+ dev_priv->psr.sink_support = true;
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ }
+ }
+
+ /* Training Pattern 3 support */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
+ intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
+ intel_dp->use_tps3 = true;
+ DRM_DEBUG_KMS("Displayport TPS3 supported");
+ } else
+ intel_dp->use_tps3 = false;
+
+ if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT))
+ return true; /* native DP sink */
+
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
+ return true; /* no per-port downstream info */
+
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
+ intel_dp->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS) < 0)
+ return false; /* downstream port status fetch failed */
+
+ return true;
}
static void
-intel_dp_restore(struct drm_connector *connector)
+intel_dp_probe_oui(struct intel_dp *intel_dp)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ u8 buf[3];
- if (dp_priv->save_DP & DP_PORT_EN)
- intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
- else
- intel_dp_link_down(intel_output, dp_priv->save_DP);
+ if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ intel_edp_panel_vdd_on(intel_dp);
+
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ edp_panel_vdd_off(intel_dp, false);
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ u8 buf[1];
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
+ return -EAGAIN;
+
+ if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+ return -ENOTTY;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+ DP_TEST_SINK_START) < 0)
+ return -EAGAIN;
+
+ /* Wait 2 vblanks to be sure we will have the correct CRC value */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
+ return -EAGAIN;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
+ return 0;
+}
+
+static bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ return intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector, 1) == 1;
+}
+
+static void
+intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+ /* NAK by default */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
/*
@@ -1114,122 +3299,296 @@ intel_dp_restore(struct drm_connector *connector)
* 4. Check link status on receipt of hot-plug interrupt
*/
-static void
-intel_dp_check_link_status(struct intel_output *intel_output)
+void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ u8 sink_irq_vector;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ /* FIXME: This access isn't protected by any locks. */
+ if (!intel_encoder->connectors_active)
+ return;
+
+ if (WARN_ON(!intel_encoder->base.crtc))
+ return;
- if (!intel_output->enc.crtc)
+ /* Try to read receiver status if the link appears to be up */
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
return;
+ }
- if (!intel_dp_get_link_status(intel_output, link_status)) {
- intel_dp_link_down(intel_output, dp_priv->DP);
+ /* Now read the DPCD to see if it's actually running */
+ if (!intel_dp_get_dpcd(intel_dp)) {
return;
}
- if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ /* Try to read the source of the interrupt */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ /* Clear interrupt source */
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
+
+ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+ }
+
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ intel_encoder->base.name);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
}
+/* XXX this is probably wrong for multiple downstream ports */
static enum drm_connector_status
-ironlake_dp_detect(struct drm_connector *connector)
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- enum drm_connector_status status;
+ uint8_t *dpcd = intel_dp->dpcd;
+ uint8_t type;
- status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_output,
- 0x000, dp_priv->dpcd,
- sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
- {
- if (dp_priv->dpcd[0] != 0)
- status = connector_status_connected;
+ if (!intel_dp_get_dpcd(intel_dp))
+ return connector_status_disconnected;
+
+ /* if there's no downstream port, we're done */
+ if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ return connector_status_connected;
+
+ /* If we're HPD-aware, SINK_COUNT changes dynamically */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
+ uint8_t reg;
+
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
+ &reg, 1) < 0)
+ return connector_status_unknown;
+
+ return DP_GET_SINK_COUNT(reg) ? connector_status_connected
+ : connector_status_disconnected;
}
- return status;
+
+ /* If no HPD, poke DDC gently */
+ if (drm_probe_ddc(&intel_dp->aux.ddc))
+ return connector_status_connected;
+
+ /* Well we tried, say unknown for unreliable port types */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+ if (type == DP_DS_PORT_TYPE_VGA ||
+ type == DP_DS_PORT_TYPE_NON_EDID)
+ return connector_status_unknown;
+ } else {
+ type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_TYPE_MASK;
+ if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+ type == DP_DWN_STRM_PORT_TYPE_OTHER)
+ return connector_status_unknown;
+ }
+
+ /* Anything else is out of spec, warn and ignore */
+ DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+ return connector_status_disconnected;
}
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
- *
- * \return true if DP port is connected.
- * \return false if DP port is disconnected.
- */
static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct intel_dp *intel_dp)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- uint32_t temp, bit;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum drm_connector_status status;
- dp_priv->has_audio = false;
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ status = intel_panel_detect(dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
- if (IS_IRONLAKE(dev))
- return ironlake_dp_detect(connector);
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ return connector_status_disconnected;
- temp = I915_READ(PORT_HOTPLUG_EN);
+ return intel_dp_detect_dpcd(intel_dp);
+}
- I915_WRITE(PORT_HOTPLUG_EN,
- temp |
- DPB_HOTPLUG_INT_EN |
- DPC_HOTPLUG_INT_EN |
- DPD_HOTPLUG_INT_EN);
+static enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ uint32_t bit;
- POSTING_READ(PORT_HOTPLUG_EN);
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ enum drm_connector_status status;
- switch (dp_priv->output_reg) {
- case DP_B:
- bit = DPB_HOTPLUG_INT_STATUS;
- break;
- case DP_C:
- bit = DPC_HOTPLUG_INT_STATUS;
- break;
- case DP_D:
- bit = DPD_HOTPLUG_INT_STATUS;
- break;
- default:
- return connector_status_unknown;
+ status = intel_panel_detect(dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
}
- temp = I915_READ(PORT_HOTPLUG_STAT);
+ if (IS_VALLEYVIEW(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+ } else {
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+ }
- if ((temp & bit) == 0)
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return connector_status_disconnected;
- status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_output,
- 0x000, dp_priv->dpcd,
- sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
- {
- if (dp_priv->dpcd[0] != 0)
- status = connector_status_connected;
+ return intel_dp_detect_dpcd(intel_dp);
+}
+
+static struct edid *
+intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return NULL;
+
+ return drm_edid_duplicate(intel_connector->edid);
}
+
+ return drm_get_edid(connector, adapter);
+}
+
+static int
+intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return 0;
+
+ return intel_connector_update_modes(connector,
+ intel_connector->edid);
+ }
+
+ return intel_ddc_get_modes(connector, adapter);
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum drm_connector_status status;
+ enum intel_display_power_domain power_domain;
+ struct edid *edid = NULL;
+
+ intel_runtime_pm_get(dev_priv);
+
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ intel_dp->has_audio = false;
+
+ if (HAS_PCH_SPLIT(dev))
+ status = ironlake_dp_detect(intel_dp);
+ else
+ status = g4x_dp_detect(intel_dp);
+
+ if (status != connector_status_connected)
+ goto out;
+
+ intel_dp_probe_oui(intel_dp);
+
+ if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
+ intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
+ } else {
+ edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+ }
+
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_connected;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ intel_runtime_pm_put(dev_priv);
+
return status;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(intel_output);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
+ intel_display_power_put(dev_priv, power_domain);
if (ret)
return ret;
- /* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_output)) {
- if (dev_priv->panel_fixed_mode != NULL) {
- struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+ /* if eDP has no EDID, fall back to fixed mode */
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev,
+ intel_connector->panel.fixed_mode);
+ if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -1237,33 +3596,164 @@ static int intel_dp_get_modes(struct drm_connector *connector)
return 0;
}
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ struct edid *edid;
+ bool has_audio = false;
+
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return has_audio;
+}
+
+static int
+intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_dp_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_dp->color_range_auto;
+ uint32_t old_range = intel_dp->color_range;
+
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_dp->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (old_auto == intel_dp->color_range_auto &&
+ old_range == intel_dp->color_range)
+ return 0;
+
+ goto done;
+ }
+
+ if (is_edp(intel_dp) &&
+ property == connector->dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val) {
+ /* the eDP scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = val;
+
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_encoder->base.crtc)
+ intel_crtc_restore_mode(intel_encoder->base.crtc);
+
+ return 0;
+}
+
static void
-intel_dp_destroy (struct drm_connector *connector)
+intel_dp_connector_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ /* Can't call is_edp() since the encoder may have been destroyed
+ * already. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ intel_panel_fini(&intel_connector->panel);
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .dpms = intel_dp_dpms,
- .mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_encoder_prepare,
- .mode_set = intel_dp_mode_set,
- .commit = intel_encoder_commit,
-};
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ drm_dp_aux_unregister(&intel_dp->aux);
+ drm_encoder_cleanup(encoder);
+ if (is_edp(intel_dp)) {
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ edp_panel_vdd_off_sync(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (intel_dp->edp_notifier.notifier_call) {
+ unregister_reboot_notifier(&intel_dp->edp_notifier);
+ intel_dp->edp_notifier.notifier_call = NULL;
+ }
+ }
+ kfree(intel_dig_port);
+}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .save = intel_dp_save,
- .restore = intel_dp_restore,
+ .dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = intel_dp_destroy,
+ .set_property = intel_dp_set_property,
+ .destroy = intel_dp_connector_destroy,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -1272,171 +3762,582 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.best_encoder = intel_best_encoder,
};
-static void intel_dp_enc_destroy(struct drm_encoder *encoder)
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_encoder_destroy,
+};
+
+static void
+intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- drm_encoder_cleanup(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ intel_dp_check_link_status(intel_dp);
}
-static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_dp_enc_destroy,
-};
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
-void
-intel_dp_hot_plug(struct intel_output *intel_output)
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ return intel_dp->output_reg;
+ }
+
+ return -1;
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ union child_device_config *p_child;
+ int i;
+ static const short port_mapping[] = {
+ [PORT_B] = PORT_IDPB,
+ [PORT_C] = PORT_IDPC,
+ [PORT_D] = PORT_IDPD,
+ };
+
+ if (port == PORT_A)
+ return true;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
- if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_output);
+ if (p_child->common.dvo_port == port_mapping[port] &&
+ (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+ (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
+ return true;
+ }
+ return false;
}
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given DP is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it is assumed that the given
- * DP is present.
- */
-static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
+
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ intel_dp->color_range_auto = true;
+
+ if (is_edp(intel_dp)) {
+ drm_mode_create_scaling_mode_property(connector->dev);
+ drm_object_attach_property(
+ &connector->base,
+ connector->dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
+static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->last_power_cycle = jiffies;
+ intel_dp->last_power_on = jiffies;
+ intel_dp->last_backlight_off = jiffies;
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, dp_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- dp_port = 0;
- if (dp_reg == DP_B || dp_reg == PCH_DP_B)
- dp_port = PORT_IDPB;
- else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
- dp_port = PORT_IDPC;
- else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
- dp_port = PORT_IDPD;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not DP, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_DP &&
- p_child->device_type != DEVICE_TYPE_eDP)
- continue;
- /* Find the eDP port */
- if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
- ret = 1;
- break;
- }
- /* Find the DP port */
- if (p_child->dvo_port == dp_port) {
- ret = 1;
- break;
- }
+ struct edp_power_seq cur, vbt, spec, final;
+ u32 pp_on, pp_off, pp_div, pp;
+ int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pp_ctrl_reg = PCH_PP_CONTROL;
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_div_reg = PCH_PP_DIVISOR;
+ } else {
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+ pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
- return ret;
+
+ /* Workaround: Need to write PP_CONTROL with the unlock key as
+ * the very first thing. */
+ pp = ironlake_get_pp_control(intel_dp);
+ I915_WRITE(pp_ctrl_reg, pp);
+
+ pp_on = I915_READ(pp_on_reg);
+ pp_off = I915_READ(pp_off_reg);
+ pp_div = I915_READ(pp_div_reg);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->vbt.edp_pps;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ if (out)
+ *out = final;
}
-void
-intel_dp_init(struct drm_device *dev, int output_reg)
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *seq)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_output *intel_output;
- struct intel_dp_priv *dp_priv;
- const char *name = NULL;
+ u32 pp_on, pp_off, pp_div, port_sel = 0;
+ int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+ int pp_on_reg, pp_off_reg, pp_div_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_div_reg = PCH_PP_DIVISOR;
+ } else {
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+ pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+ }
- if (!dp_is_present_in_vbt(dev, output_reg)) {
- DRM_DEBUG_KMS("DP is not present. Ignore it\n");
+ /*
+ * And finally store the new values in the power sequencer. The
+ * backlight delays are set to 1 because we do manual waits on them. For
+ * T8, even BSpec recommends doing it. For T9, if we don't do this,
+ * we'll end up waiting for the backlight off delay twice: once when we
+ * do the manual sleep, and once when we disable the panel and wait for
+ * the PP_STATUS bit to become zero.
+ */
+ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ /* Compute the divisor for the pp clock, simply match the Bspec
+ * formula. */
+ pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (IS_VALLEYVIEW(dev)) {
+ if (dp_to_dig_port(intel_dp)->port == PORT_B)
+ port_sel = PANEL_PORT_SELECT_DPB_VLV;
+ else
+ port_sel = PANEL_PORT_SELECT_DPC_VLV;
+ } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (dp_to_dig_port(intel_dp)->port == PORT_A)
+ port_sel = PANEL_PORT_SELECT_DPA;
+ else
+ port_sel = PANEL_PORT_SELECT_DPD;
+ }
+
+ pp_on |= port_sel;
+
+ I915_WRITE(pp_on_reg, pp_on);
+ I915_WRITE(pp_off_reg, pp_off);
+ I915_WRITE(pp_div_reg, pp_div);
+
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ I915_READ(pp_on_reg),
+ I915_READ(pp_off_reg),
+ I915_READ(pp_div_reg));
+}
+
+void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp = NULL;
+ struct intel_crtc_config *config = NULL;
+ struct intel_crtc *intel_crtc = NULL;
+ struct intel_connector *intel_connector = dev_priv->drrs.connector;
+ u32 reg, val;
+ enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
+
+ if (refresh_rate <= 0) {
+ DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
return;
}
- intel_output = kcalloc(sizeof(struct intel_output) +
- sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
- if (!intel_output)
+
+ if (intel_connector == NULL) {
+ DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
return;
+ }
- dp_priv = (struct intel_dp_priv *)(intel_output + 1);
+ if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
+ DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
+ return;
+ }
- connector = &intel_output->base;
- drm_connector_init(dev, connector, &intel_dp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ encoder = intel_attached_encoder(&intel_connector->base);
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_crtc = encoder->new_crtc;
+
+ if (!intel_crtc) {
+ DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
+ return;
+ }
+
+ config = &intel_crtc->config;
+
+ if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
+ DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
+ return;
+ }
+
+ if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
+ index = DRRS_LOW_RR;
+
+ if (index == intel_dp->drrs_state.refresh_rate_type) {
+ DRM_DEBUG_KMS(
+ "DRRS requested for previously set RR...ignoring\n");
+ return;
+ }
+
+ if (!intel_crtc->active) {
+ DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
+ return;
+ }
+
+ if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
+ reg = PIPECONF(intel_crtc->config.cpu_transcoder);
+ val = I915_READ(reg);
+ if (index > DRRS_HIGH_RR) {
+ val |= PIPECONF_EDP_RR_MODE_SWITCH;
+ intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
+ } else {
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+ }
+ I915_WRITE(reg, val);
+ }
+
+ /*
+ * mutex taken to ensure that there is no race between differnt
+ * drrs calls trying to update refresh rate. This scenario may occur
+ * in future when idleness detection based DRRS in kernel and
+ * possible calls from user space to set differnt RR are made.
+ */
+
+ mutex_lock(&intel_dp->drrs_state.mutex);
+
+ intel_dp->drrs_state.refresh_rate_type = index;
+
+ mutex_unlock(&intel_dp->drrs_state.mutex);
+
+ DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+}
+
+static struct drm_display_mode *
+intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector,
+ struct drm_display_mode *fixed_mode)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *downclock_mode = NULL;
+
+ if (INTEL_INFO(dev)->gen <= 6) {
+ DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
+ return NULL;
+ }
+
+ if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
+ DRM_INFO("VBT doesn't support DRRS\n");
+ return NULL;
+ }
+
+ downclock_mode = intel_find_panel_downclock
+ (dev, fixed_mode, connector);
+
+ if (!downclock_mode) {
+ DRM_INFO("DRRS not supported\n");
+ return NULL;
+ }
+
+ dev_priv->drrs.connector = intel_connector;
+
+ mutex_init(&intel_dp->drrs_state.mutex);
+
+ intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
+
+ intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
+ DRM_INFO("seamless DRRS supported for eDP panel.\n");
+ return downclock_mode;
+}
+
+static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ struct intel_connector *intel_connector,
+ struct edp_power_seq *power_seq)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *downclock_mode = NULL;
+ bool has_dpcd;
+ struct drm_display_mode *scan;
+ struct edid *edid;
+
+ intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
+
+ if (!is_edp(intel_dp))
+ return true;
+
+ /* The VDD bit needs a power domain reference, so if the bit is already
+ * enabled when we boot, grab this reference. */
+ if (edp_have_panel_vdd(intel_dp)) {
+ enum intel_display_power_domain power_domain;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+ }
- if (output_reg == DP_A)
- intel_output->type = INTEL_OUTPUT_EDP;
+ /* Cache DPCD and EDID for edp. */
+ intel_edp_panel_vdd_on(intel_dp);
+ has_dpcd = intel_dp_get_dpcd(intel_dp);
+ edp_panel_vdd_off(intel_dp, false);
+
+ if (has_dpcd) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake =
+ intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+ } else {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ return false;
+ }
+
+ /* We now know it's not a ghost, init power sequence regs. */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
+
+ mutex_lock(&dev->mode_config.mutex);
+ edid = drm_get_edid(connector, &intel_dp->aux.ddc);
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ intel_connector->edid = edid;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ downclock_mode = intel_dp_drrs_init(
+ intel_dig_port,
+ intel_connector, fixed_mode);
+ break;
+ }
+ }
+
+ /* fallback to VBT if available for eDP */
+ if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
+ fixed_mode = drm_mode_duplicate(dev,
+ dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (fixed_mode)
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (IS_VALLEYVIEW(dev)) {
+ intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+ register_reboot_notifier(&intel_dp->edp_notifier);
+ }
+
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_setup_backlight(connector);
+
+ return true;
+}
+
+bool
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ struct edp_power_seq power_seq = { 0 };
+ int type;
+
+ /* intel_dp vfuncs */
+ if (IS_VALLEYVIEW(dev))
+ intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
- intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
-
- if (output_reg == DP_B || output_reg == PCH_DP_B)
- intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C || output_reg == PCH_DP_C)
- intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D || output_reg == PCH_DP_D)
- intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
-
- if (IS_eDP(intel_output)) {
- intel_output->crtc_mask = (1 << 1);
- intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- } else
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+
+ intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
+
+ if (intel_dp_is_edp(dev, port))
+ type = DRM_MODE_CONNECTOR_eDP;
+ else
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ /*
+ * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
+ * for DP the encoder type can be set by the caller to
+ * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
+ */
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+
+ DRM_DEBUG_KMS("Adding %s connector on port %c\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ port_name(port));
+
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- dp_priv->intel_output = intel_output;
- dp_priv->output_reg = output_reg;
- dp_priv->has_audio = false;
- dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
- intel_output->dev_priv = dp_priv;
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ edp_panel_vdd_work);
- drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- /* Set up the DDC bus. */
- switch (output_reg) {
- case DP_A:
- name = "DPDDC-A";
- break;
- case DP_B:
- case PCH_DP_B:
- dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS;
- name = "DPDDC-B";
- break;
- case DP_C:
- case PCH_DP_C:
- dev_priv->hotplug_supported_mask |=
- HDMIC_HOTPLUG_INT_STATUS;
- name = "DPDDC-C";
- break;
- case DP_D:
- case PCH_DP_D:
- dev_priv->hotplug_supported_mask |=
- HDMID_HOTPLUG_INT_STATUS;
- name = "DPDDC-D";
- break;
+ if (HAS_DDI(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_dp_connector_unregister;
+
+ /* Set up the hotplug pin. */
+ switch (port) {
+ case PORT_A:
+ intel_encoder->hpd_pin = HPD_PORT_A;
+ break;
+ case PORT_B:
+ intel_encoder->hpd_pin = HPD_PORT_B;
+ break;
+ case PORT_C:
+ intel_encoder->hpd_pin = HPD_PORT_C;
+ break;
+ case PORT_D:
+ intel_encoder->hpd_pin = HPD_PORT_D;
+ break;
+ default:
+ BUG();
}
- intel_dp_i2c_init(intel_output, name);
+ if (is_edp(intel_dp)) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ }
- intel_output->ddc_bus = &dp_priv->adapter;
- intel_output->hot_plug = intel_dp_hot_plug;
+ intel_dp_aux_init(intel_dp, intel_connector);
- if (output_reg == DP_A) {
- /* initialize panel mode from VBT if available for eDP */
- if (dev_priv->lfp_lvds_vbt_mode) {
- dev_priv->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- }
+ intel_dp->psr_setup_done = false;
+
+ if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
+ drm_dp_aux_unregister(&intel_dp->aux);
+ if (is_edp(intel_dp)) {
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ edp_panel_vdd_off_sync(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ return false;
}
+ intel_dp_add_properties(intel_dp, connector);
+
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
@@ -1445,4 +4346,71 @@ intel_dp_init(struct drm_device *dev, int output_reg)
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+
+ return true;
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ intel_encoder->compute_config = intel_dp_compute_config;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_encoder->get_config = intel_dp_get_config;
+ if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_enable = chv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->post_disable = chv_post_disable_dp;
+ } else if (IS_VALLEYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->post_disable = vlv_post_disable_dp;
+ } else {
+ intel_encoder->pre_enable = g4x_pre_enable_dp;
+ intel_encoder->enable = g4x_enable_dp;
+ intel_encoder->post_disable = g4x_post_disable_dp;
+ }
+
+ intel_dig_port->port = port;
+ intel_dig_port->dp.output_reg = output_reg;
+
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ if (IS_CHERRYVIEW(dev)) {
+ if (port == PORT_D)
+ intel_encoder->crtc_mask = 1 << 2;
+ else
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ } else {
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ }
+ intel_encoder->cloneable = 0;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
+ kfree(intel_connector);
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff..f67340ed2c1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,12 +26,48 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
-#include <linux/i2c-algo-bit.h>
+#include <linux/hdmi.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "drm_crtc.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_helper.h>
+
+/**
+ * _wait_for - magic (register) wait macro
+ *
+ * Does the right thing for modeset paths when run under kdgb or similar atomic
+ * contexts. Note that it's important that we check the condition again after
+ * having timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ if (!(COND)) \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && drm_can_sleep()) { \
+ msleep(W); \
+ } else { \
+ cpu_relax(); \
+ } \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define wait_for_atomic_us(COND, US) _wait_for((COND), \
+ DIV_ROUND_UP((US), 1000), 0)
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
-#include "drm_crtc_helper.h"
/*
* Display related stuff
*/
@@ -41,7 +77,12 @@
/* the i915, i945 have a single sDVO i2c bus - which is different */
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
-#define INTELFB_CONN_LIMIT 4
+
+/* Maximum cursor sizes */
+#define GEN2_CURSOR_WIDTH 64
+#define GEN2_CURSOR_HEIGHT 64
+#define MAX_CURSOR_WIDTH 256
+#define MAX_CURSOR_HEIGHT 256
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -57,174 +98,904 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
-
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
+#define INTEL_OUTPUT_DSI 9
+#define INTEL_OUTPUT_UNKNOWN 10
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-struct intel_i2c_chan {
- struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
- u32 reg; /* GPIO reg */
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
-};
+#define INTEL_DSI_VIDEO_MODE 0
+#define INTEL_DSI_COMMAND_MODE 1
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer *fb;
+ struct list_head fbdev_list;
+ struct drm_display_mode *our_mode;
+ int preferred_bpp;
+};
-struct intel_output {
- struct drm_connector base;
+struct intel_encoder {
+ struct drm_encoder base;
+ /*
+ * The new crtc this encoder will be driven from. Only differs from
+ * base->crtc while a modeset is in progress.
+ */
+ struct intel_crtc *new_crtc;
- struct drm_encoder enc;
int type;
- struct i2c_adapter *i2c_bus;
- struct i2c_adapter *ddc_bus;
- bool load_detect_temp;
- bool needs_tv_clock;
- void *dev_priv;
- void (*hot_plug)(struct intel_output *);
+ unsigned int cloneable;
+ bool connectors_active;
+ void (*hot_plug)(struct intel_encoder *);
+ bool (*compute_config)(struct intel_encoder *,
+ struct intel_crtc_config *);
+ void (*pre_pll_enable)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
+ void (*enable)(struct intel_encoder *);
+ void (*mode_set)(struct intel_encoder *intel_encoder);
+ void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
+ /* Reconstructs the equivalent mode flags for the current hardware
+ * state. This must be called _after_ display->get_pipe_config has
+ * pre-filled the pipe config. Note that intel_encoder->base.crtc must
+ * be set correctly before calling this function. */
+ void (*get_config)(struct intel_encoder *,
+ struct intel_crtc_config *pipe_config);
int crtc_mask;
- int clone_mask;
-};
-
-struct intel_crtc;
-struct intel_overlay {
- struct drm_device *dev;
- struct intel_crtc *crtc;
- struct drm_i915_gem_object *vid_bo;
- struct drm_i915_gem_object *old_vid_bo;
- int active;
- int pfit_active;
- u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
- u32 color_key;
- u32 brightness, contrast, saturation;
- u32 old_xscale, old_yscale;
- /* register access */
- u32 flip_addr;
- struct drm_i915_gem_object *reg_bo;
- void *virt_addr;
- /* flip handling */
- uint32_t last_flip_req;
- int hw_wedged;
-#define HW_WEDGED 1
-#define NEEDS_WAIT_FOR_FLIP 2
-#define RELEASE_OLD_VID 3
-#define SWITCH_OFF_STAGE_1 4
-#define SWITCH_OFF_STAGE_2 5
+ enum hpd_pin hpd_pin;
+};
+
+struct intel_panel {
+ struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *downclock_mode;
+ int fitting_mode;
+
+ /* backlight */
+ struct {
+ bool present;
+ u32 level;
+ u32 max;
+ bool enabled;
+ bool combination_mode; /* gen 2/4 only */
+ bool active_low_pwm;
+ struct backlight_device *device;
+ } backlight;
+};
+
+struct intel_connector {
+ struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
+ struct intel_encoder *encoder;
+
+ /*
+ * The new encoder this connector will be driven. Only differs from
+ * encoder while a modeset is in progress.
+ */
+ struct intel_encoder *new_encoder;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
+
+ /*
+ * Removes all interfaces through which the connector is accessible
+ * - like sysfs, debugfs entries -, so that no new operations can be
+ * started on the connector. Also makes sure all currently pending
+ * operations finish before returing.
+ */
+ void (*unregister)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
+
+ /* since POLL and HPD connectors may use the same HPD line keep the native
+ state of connector->polled in case hotplug storm detection changes it */
+ u8 polled;
+};
+
+typedef struct dpll {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+} intel_clock_t;
+
+struct intel_plane_config {
+ bool tiled;
+ int size;
+ u32 base;
+};
+
+struct intel_crtc_config {
+ /**
+ * quirks - bitfield with hw state readout quirks
+ *
+ * For various reasons the hw state readout code might not be able to
+ * completely faithfully read out the current state. These cases are
+ * tracked with quirk flags so that fastboot and state checker can act
+ * accordingly.
+ */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
+ unsigned long quirks;
+
+ /* User requested mode, only valid as a starting point to
+ * compute adjusted_mode, except in the case of (S)DVO where
+ * it's also for the output timings of the (S)DVO chip.
+ * adjusted_mode will then correspond to the S(DVO) chip's
+ * preferred input timings. */
+ struct drm_display_mode requested_mode;
+ /* Actual pipe timings ie. what we program into the pipe timing
+ * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
+ struct drm_display_mode adjusted_mode;
+
+ /* Pipe source size (ie. panel fitter input size)
+ * All planes will be positioned inside this space,
+ * and get clipped at the edges. */
+ int pipe_src_w, pipe_src_h;
+
+ /* Whether to set up the PCH/FDI. Note that we never allow sharing
+ * between pch encoders and cpu encoders. */
+ bool has_pch_encoder;
+
+ /* CPU Transcoder for the pipe. Currently this can only differ from the
+ * pipe on Haswell (where we have a special eDP transcoder). */
+ enum transcoder cpu_transcoder;
+
+ /*
+ * Use reduced/limited/broadcast rbg range, compressing from the full
+ * range fed into the crtcs.
+ */
+ bool limited_color_range;
+
+ /* DP has a bunch of special case unfortunately, so mark the pipe
+ * accordingly. */
+ bool has_dp_encoder;
+
+ /* Whether we should send NULL infoframes. Required for audio. */
+ bool has_hdmi_sink;
+
+ /* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
+ * has_dp_encoder is set. */
+ bool has_audio;
+
+ /*
+ * Enable dithering, used when the selected pipe bpp doesn't match the
+ * plane bpp.
+ */
+ bool dither;
+
+ /* Controls for the clock computation, to override various stages. */
+ bool clock_set;
+
+ /* SDVO TV has a bunch of special case. To make multifunction encoders
+ * work correctly, we need to track this at runtime.*/
+ bool sdvo_tv_clock;
+
+ /*
+ * crtc bandwidth limit, don't increase pipe bpp or clock if not really
+ * required. This is set in the 2nd loop of calling encoder's
+ * ->compute_config if the first pick doesn't work out.
+ */
+ bool bw_constrained;
+
+ /* Settings for the intel dpll used on pretty much everything but
+ * haswell. */
+ struct dpll dpll;
+
+ /* Selected dpll when shared or DPLL_ID_PRIVATE. */
+ enum intel_dpll_id shared_dpll;
+
+ /* Actual register state of the dpll, for shared dpll cross-checking. */
+ struct intel_dpll_hw_state dpll_hw_state;
+
+ int pipe_bpp;
+ struct intel_link_m_n dp_m_n;
+
+ /* m2_n2 for eDP downclock */
+ struct intel_link_m_n dp_m2_n2;
+
+ /*
+ * Frequence the dpll for the port should run at. Differs from the
+ * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+ * already multiplied by pixel_multiplier.
+ */
+ int port_clock;
+
+ /* Used by SDVO (and if we ever fix it, HDMI). */
+ unsigned pixel_multiplier;
+
+ /* Panel fitter controls for gen2-gen4 + VLV */
+ struct {
+ u32 control;
+ u32 pgm_ratios;
+ u32 lvds_border_bits;
+ } gmch_pfit;
+
+ /* Panel fitter placement and size for Ironlake+ */
+ struct {
+ u32 pos;
+ u32 size;
+ bool enabled;
+ } pch_pfit;
+
+ /* FDI configuration, only valid if has_pch_encoder is set. */
+ int fdi_lanes;
+ struct intel_link_m_n fdi_m_n;
+
+ bool ips_enabled;
+
+ bool double_wide;
+};
+
+struct intel_pipe_wm {
+ struct intel_wm_level wm[5];
+ uint32_t linetime;
+ bool fbc_wm_enabled;
+ bool pipe_enabled;
+ bool sprites_enabled;
+ bool sprites_scaled;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
- struct drm_gem_object *cursor_bo;
- uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
- int dpms_mode;
- bool busy; /* is scanout buffer being updated frequently? */
- struct timer_list idle_timer;
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
+ unsigned long enabled_power_domains;
+ bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
+
+ atomic_t unpin_work_count;
+
+ /* Display surface base address adjustement for pageflips. Note that on
+ * gen4+ this only adjusts up to a tile, offsets within a tile are
+ * handled in the hw itself (with the TILEOFF register). */
+ unsigned long dspaddr_offset;
+
+ struct drm_i915_gem_object *cursor_bo;
+ uint32_t cursor_addr;
+ int16_t cursor_x, cursor_y;
+ int16_t cursor_width, cursor_height;
+ uint32_t cursor_cntl;
+ uint32_t cursor_base;
+
+ struct intel_plane_config plane_config;
+ struct intel_crtc_config config;
+ struct intel_crtc_config *new_config;
+ bool new_enabled;
+
+ uint32_t ddi_pll_sel;
+
+ /* reset counter value when the last flip was submitted */
+ unsigned int reset_counter;
+
+ /* Access to these should be protected by dev_priv->irq_lock. */
+ bool cpu_fifo_underrun_disabled;
+ bool pch_fifo_underrun_disabled;
+
+ /* per-pipe watermark state */
+ struct {
+ /* watermarks currently being used */
+ struct intel_pipe_wm active;
+ } wm;
+
+ wait_queue_head_t vbl_wait;
+
+ int scanline_offset;
+};
+
+struct intel_plane_wm_parameters {
+ uint32_t horiz_pixels;
+ uint8_t bytes_per_pixel;
+ bool enabled;
+ bool scaled;
+};
+
+struct intel_plane {
+ struct drm_plane base;
+ int plane;
+ enum pipe pipe;
+ struct drm_i915_gem_object *obj;
+ bool can_scale;
+ int max_downscale;
+ u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+
+ /* Since we need to change the watermarks before/after
+ * enabling/disabling the planes, we need to store the parameters here
+ * as the other pieces of the struct may not reflect the values we want
+ * for the watermark calculations. Currently only Haswell uses this.
+ */
+ struct intel_plane_wm_parameters wm;
+
+ void (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h);
+ void (*disable_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc);
+ int (*update_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+ void (*get_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+};
+
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_output(x) container_of(x, struct intel_output, base)
-#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_output *intel_output);
-extern bool intel_ddc_probe(struct intel_output *intel_output);
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
-void intel_i2c_reset_gmbus(struct drm_device *dev);
-
-extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
-extern void intel_dvo_init(struct drm_device *dev);
-extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
-extern void intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
-void
-intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-extern void intel_edp_link_config (struct intel_output *, int *, int *);
+struct intel_hdmi {
+ u32 hdmi_reg;
+ int ddc_bus;
+ uint32_t color_range;
+ bool color_range_auto;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ bool rgb_quant_range_selectable;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ enum hdmi_infoframe_type type,
+ const void *frame, ssize_t len);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ bool enable,
+ struct drm_display_mode *adjusted_mode);
+};
+
+#define DP_MAX_DOWNSTREAM_PORTS 0x10
+
+/**
+ * HIGH_RR is the highest eDP panel refresh rate read from EDID
+ * LOW_RR is the lowest eDP panel refresh rate found from EDID
+ * parsing for same resolution.
+ */
+enum edp_drrs_refresh_rate_type {
+ DRRS_HIGH_RR,
+ DRRS_LOW_RR,
+ DRRS_MAX_RR, /* RR count */
+};
+
+struct intel_dp {
+ uint32_t output_reg;
+ uint32_t aux_ch_ctl_reg;
+ uint32_t DP;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ uint32_t color_range;
+ bool color_range_auto;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
+ uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ struct drm_dp_aux aux;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ unsigned long last_power_cycle;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
+ bool psr_setup_done;
+ struct notifier_block edp_notifier;
+
+ bool use_tps3;
+ struct intel_connector *attached_connector;
+
+ uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+ /*
+ * This function returns the value we have to program the AUX_CTL
+ * register with to kick off an AUX transaction.
+ */
+ uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider);
+ struct {
+ enum drrs_support_type type;
+ enum edp_drrs_refresh_rate_type refresh_rate_type;
+ struct mutex mutex;
+ } drrs_state;
+
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+ u32 saved_port_bits;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
+};
+
+static inline int
+vlv_dport_to_channel(struct intel_digital_port *dport)
+{
+ switch (dport->port) {
+ case PORT_B:
+ case PORT_D:
+ return DPIO_CH0;
+ case PORT_C:
+ return DPIO_CH1;
+ default:
+ BUG();
+ }
+}
+
+static inline int
+vlv_pipe_to_channel(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ case PIPE_C:
+ return DPIO_CH0;
+ case PIPE_B:
+ return DPIO_CH1;
+ default:
+ BUG();
+ }
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->plane_to_crtc_mapping[plane];
+}
+
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_crtc *crtc;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ atomic_t pending;
+#define INTEL_FLIP_INACTIVE 0
+#define INTEL_FLIP_PENDING 1
+#define INTEL_FLIP_COMPLETE 2
+ u32 flip_count;
+ u32 gtt_offset;
+ bool enable_stall_check;
+};
+
+struct intel_set_config {
+ struct drm_encoder **save_connector_encoders;
+ struct drm_crtc **save_encoder_crtcs;
+ bool *save_crtc_enabled;
+
+ bool fb_changed;
+ bool mode_changed;
+};
+
+struct intel_load_detect_pipe {
+ struct drm_framebuffer *release_fb;
+ bool load_detect_temp;
+ int dpms_mode;
+};
+
+static inline struct intel_encoder *
+intel_attached_encoder(struct drm_connector *connector)
+{
+ return to_intel_connector(connector)->encoder;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_digital_port, base.base);
+}
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->dp;
+}
-extern int intel_panel_fitter_pipe (struct drm_device *dev);
-extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare (struct drm_encoder *encoder);
-extern void intel_encoder_commit (struct drm_encoder *encoder);
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
-extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc);
+
+/* i915_irq.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable);
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
+void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void i9xx_check_fifo_underruns(struct drm_device *dev);
+
+
+/* intel_crt.c */
+void intel_crt_init(struct drm_device *dev);
+
+
+/* intel_ddi.c */
+void intel_prepare_ddi(struct drm_device *dev);
+void hsw_fdi_link_train(struct drm_crtc *crtc);
+void intel_ddi_init(struct drm_device *dev, enum port port);
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+void intel_ddi_pll_init(struct drm_device *dev);
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+bool intel_ddi_pll_select(struct intel_crtc *crtc);
+void intel_ddi_pll_enable(struct intel_crtc *crtc);
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+
+
+/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
+int intel_pch_rawclk(struct drm_device *dev);
+int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
+void intel_mark_busy(struct drm_device *dev);
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring);
+void intel_mark_idle(struct drm_device *dev);
+void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_update_dpms(struct drm_crtc *crtc);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+void intel_connector_dpms(struct drm_connector *, int mode);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+void intel_modeset_check_state(struct drm_device *dev);
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_wait_for_vblank(struct drm_device *dev);
-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
- struct drm_display_mode *mode,
- int *dpms_mode);
-extern void intel_release_load_detect_pipe(struct intel_output *intel_output,
- int dpms_mode);
-
-extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
-extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
-extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
-extern void intelfb_restore(void);
-extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
-extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
-extern void intel_init_clock_gating(struct drm_device *dev);
-
-extern int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj);
-
-extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
-extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
-
-extern void intel_setup_overlay(struct drm_device *dev);
-extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible);
-extern int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport);
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *pipelined);
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+struct drm_framebuffer *
+__intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
+void intel_prepare_page_flip(struct drm_device *dev, int plane);
+void intel_finish_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
+void intel_display_handle_reset(struct drm_device *dev);
+void hsw_enable_pc8(struct drm_i915_private *dev_priv);
+void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+void
+ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+ int dotclock);
+bool intel_crtc_active(struct drm_crtc *crtc);
+void hsw_enable_ips(struct intel_crtc *crtc);
+void hsw_disable_ips(struct intel_crtc *crtc);
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
+enum intel_display_power_domain
+intel_display_port_power_domain(struct intel_encoder *intel_encoder);
+int valleyview_get_vco(struct drm_i915_private *dev_priv);
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+ struct intel_crtc_config *pipe_config);
+int intel_format_to_fourcc(int format);
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
+
+
+/* intel_dp.c */
+void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+void intel_dp_check_link_status(struct intel_dp *intel_dp);
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+bool intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+void intel_edp_backlight_on(struct intel_dp *intel_dp);
+void intel_edp_backlight_off(struct intel_dp *intel_dp);
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
+void intel_edp_psr_enable(struct intel_dp *intel_dp);
+void intel_edp_psr_disable(struct intel_dp *intel_dp);
+void intel_edp_psr_update(struct drm_device *dev);
+void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
+
+/* intel_dsi.c */
+bool intel_dsi_init(struct drm_device *dev);
+
+
+/* intel_dvo.c */
+void intel_dvo_init(struct drm_device *dev);
+
+
+/* legacy fbdev emulation in intel_fbdev.c */
+#ifdef CONFIG_DRM_I915_FBDEV
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
+extern void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void intel_fbdev_initial_config(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+/* intel_hdmi.c */
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+
+
+/* intel_lvds.c */
+void intel_lvds_init(struct drm_device *dev);
+bool intel_is_dual_link_lvds(struct drm_device *dev);
+
+
+/* intel_modes.c */
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+
+/* intel_overlay.c */
+void intel_setup_overlay(struct drm_device *dev);
+void intel_cleanup_overlay(struct drm_device *dev);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+
+/* intel_panel.c */
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *downclock_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+ u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector);
+void intel_panel_enable_backlight(struct intel_connector *connector);
+void intel_panel_disable_backlight(struct intel_connector *connector);
+void intel_panel_destroy_backlight(struct drm_connector *connector);
+void intel_panel_init_backlight_funcs(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+extern struct drm_display_mode *intel_find_panel_downclock(
+ struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector);
+
+/* intel_pm.c */
+void intel_init_clock_gating(struct drm_device *dev);
+void intel_suspend_hw(struct drm_device *dev);
+int ilk_wm_max_level(const struct drm_device *dev);
+void intel_update_watermarks(struct drm_crtc *crtc);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enabled, bool scaled);
+void intel_init_pm(struct drm_device *dev);
+void intel_pm_setup(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_update_fbc(struct drm_device *dev);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_remove(struct drm_i915_private *);
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_init_gt_powersave(struct drm_device *dev);
+void intel_cleanup_gt_powersave(struct drm_device *dev);
+void intel_enable_gt_powersave(struct drm_device *dev);
+void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_reset_gt_powersave(struct drm_device *dev);
+void ironlake_teardown_rc6(struct drm_device *dev);
+void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_device *dev);
+void __vlv_set_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id, bool enable);
+
+/* intel_sdvo.c */
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
+
+
+/* intel_sprite.c */
+int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+void intel_plane_restore(struct drm_plane *plane);
+void intel_plane_disable(struct drm_plane *plane);
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+
+/* intel_tv.c */
+void intel_tv_init(struct drm_device *dev);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 00000000000..3fd082933c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/* the sub-encoders aka panel drivers */
+static const struct intel_dsi_device intel_dsi_devices[] = {
+ {
+ .panel_id = MIPI_DSI_GENERIC_PANEL_ID,
+ .name = "vbt-generic-dsi-vid-mode-display",
+ .dev_ops = &vbt_generic_dsi_display_ops,
+ },
+};
+
+static void band_gap_reset(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->dpio_lock);
+
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+ udelay(150);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dsi, base);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+static void intel_dsi_hot_plug(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static bool intel_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *config)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
+ struct drm_display_mode *mode = &config->requested_mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (fixed_mode)
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+ if (intel_dsi->dev.dev_ops->mode_fixup)
+ return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
+ mode, adjusted_mode);
+
+ return true;
+}
+
+static void intel_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+ /* program rcomp for compliance, reduce from 50 ohms to 45 ohms
+ * needed everytime after power gate */
+ vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* bandgap reset is needed after everytime we do power gate */
+ band_gap_reset(dev_priv);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
+
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+ usleep_range(2500, 3000);
+}
+
+static void intel_dsi_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (is_cmd_mode(intel_dsi))
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
+ else {
+ msleep(20); /* XXX */
+ dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
+ msleep(100);
+
+ if (intel_dsi->dev.dev_ops->enable)
+ intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+
+ /* assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
+ temp = temp | intel_dsi->port_bits;
+ I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(pipe));
+ }
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* Disable DPOunit clock gating, can stall pipe
+ * and we need DPLL REFA always enabled */
+ tmp = I915_READ(DPLL(pipe));
+ tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+ I915_WRITE(DPLL(pipe), tmp);
+
+ tmp = I915_READ(DSPCLK_GATE_D);
+ tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, tmp);
+
+ /* put device in ready state */
+ intel_dsi_device_ready(encoder);
+
+ msleep(intel_dsi->panel_on_delay);
+
+ if (intel_dsi->dev.dev_ops->panel_reset)
+ intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+
+ if (intel_dsi->dev.dev_ops->send_otp_cmds)
+ intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
+
+ /* Enable port in pre-enable phase itself because as per hw team
+ * recommendation, port should be enabled befor plane & pipe */
+ intel_dsi_enable(encoder);
+}
+
+static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ /* for DSI port enable has to be done before pipe
+ * and plane enable, so port enable is done in
+ * pre_enable phase itself unlike other encoders
+ */
+}
+
+static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ DRM_DEBUG_KMS("\n");
+
+ if (is_vid_mode(intel_dsi)) {
+ /* Send Shutdown command to the panel in LP mode */
+ dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
+ msleep(10);
+ }
+}
+
+static void intel_dsi_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (is_vid_mode(intel_dsi)) {
+ /* de-assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(pipe));
+
+ msleep(2);
+ }
+
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
+
+ temp = I915_READ(MIPI_CTRL(pipe));
+ temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(pipe), temp |
+ intel_dsi->escape_clk_div <<
+ ESCAPE_CLOCK_DIVIDER_SHIFT);
+
+ I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
+
+ temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
+ temp &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
+
+ /* if disable packets are sent before sending shutdown packet then in
+ * some next enable sequence send turn on packet error is observed */
+ if (intel_dsi->dev.dev_ops->disable)
+ intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+}
+
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ DRM_DEBUG_KMS("\n");
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
+ == 0x00000), 30))
+ DRM_ERROR("DSI LP not going Low\n");
+
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+ usleep_range(2000, 2500);
+
+ vlv_disable_dsi_pll(encoder);
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 val;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi_disable(encoder);
+
+ intel_dsi_clear_device_ready(encoder);
+
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+
+ if (intel_dsi->dev.dev_ops->disable_panel_power)
+ intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+
+ msleep(intel_dsi->panel_off_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ u32 port, func;
+ enum pipe p;
+
+ DRM_DEBUG_KMS("\n");
+
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ /* XXX: this only works for one DSI output */
+ for (p = PIPE_A; p <= PIPE_B; p++) {
+ port = I915_READ(MIPI_PORT_CTRL(p));
+ func = I915_READ(MIPI_DSI_FUNC_PRG(p));
+
+ if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+ if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
+ *pipe = p;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ DRM_DEBUG_KMS("\n");
+
+ /* XXX: read flags, set to adjusted_mode */
+}
+
+static enum drm_mode_status
+intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+
+ DRM_DEBUG_KMS("\n");
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
+ return MODE_NO_DBLESCAN;
+ }
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+ switch (divider) {
+ case ESCAPE_CLOCK_DIVIDER_1:
+ default:
+ return 20 * us;
+ case ESCAPE_CLOCK_DIVIDER_2:
+ return 10 * us;
+ case ESCAPE_CLOCK_DIVIDER_4:
+ return 5 * us;
+ }
+}
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int pipe = intel_crtc->pipe;
+ unsigned int bpp = intel_crtc->config.pipe_bpp;
+ unsigned int lane_count = intel_dsi->lane_count;
+
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+ hactive = mode->hdisplay;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsync = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* horizontal values are in terms of high speed byte clock */
+ hactive = txbyteclkhs(hactive, bpp, lane_count);
+ hfp = txbyteclkhs(hfp, bpp, lane_count);
+ hsync = txbyteclkhs(hsync, bpp, lane_count);
+ hbp = txbyteclkhs(hbp, bpp, lane_count);
+
+ I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
+ I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+
+ /* meaningful for video mode non-burst sync pulse mode only, can be zero
+ * for non-burst sync events and burst modes */
+ I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
+ I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+
+ /* vertical values are in terms of lines */
+ I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
+ I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
+ I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+}
+
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ int pipe = intel_crtc->pipe;
+ unsigned int bpp = intel_crtc->config.pipe_bpp;
+ u32 val, tmp;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+ /* escape clock divider, 20MHz, shared for A and C. device ready must be
+ * off when doing this! txclkesc? */
+ tmp = I915_READ(MIPI_CTRL(0));
+ tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+
+ /* read request priority is per pipe */
+ tmp = I915_READ(MIPI_CTRL(pipe));
+ tmp &= ~READ_REQUEST_PRIORITY_MASK;
+ I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+
+ /* XXX: why here, why like this? handling in irq handler?! */
+ I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
+ I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+
+ I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
+
+ I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
+ adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+ adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+
+ set_dsi_timings(encoder, adjusted_mode);
+
+ val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+ if (is_cmd_mode(intel_dsi)) {
+ val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+ val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+ } else {
+ val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+
+ /* XXX: cross-check bpp vs. pixel format? */
+ val |= intel_dsi->pixel_format;
+ }
+ I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
+
+ /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
+ * stop state. */
+
+ /*
+ * In burst mode, value greater than one DPI line Time in byte clock
+ * (txbyteclkhs) To timeout this timer 1+ of the above said value is
+ * recommended.
+ *
+ * In non-burst mode, Value greater than one DPI frame time in byte
+ * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+ * is recommended.
+ *
+ * In DBI only mode, value greater than one DBI frame time in byte
+ * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+ * is recommended.
+ */
+
+ if (is_vid_mode(intel_dsi) &&
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+ txbyteclkhs(adjusted_mode->htotal, bpp,
+ intel_dsi->lane_count) + 1);
+ } else {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+ txbyteclkhs(adjusted_mode->vtotal *
+ adjusted_mode->htotal,
+ bpp, intel_dsi->lane_count) + 1);
+ }
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
+
+ /* dphy stuff */
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
+
+ val = 0;
+ if (intel_dsi->eotp_pkt == 0)
+ val |= EOT_DISABLE;
+
+ if (intel_dsi->clock_stop)
+ val |= CLOCKSTOP;
+
+ /* recovery disables */
+ I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
+
+ /* in terms of txbyteclkhs. actual high to low switch +
+ * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+ *
+ * XXX: write MIPI_STOP_STATE_STALL?
+ */
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
+ intel_dsi->hs_to_lp_count);
+
+ /* XXX: low power clock equivalence in terms of byte clock. the number
+ * of byte clocks occupied in one low power clock. based on txbyteclkhs
+ * and txclkesc. txclkesc time / txbyteclk time * (105 +
+ * MIPI_STOP_STATE_STALL) / 105.???
+ */
+ I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
+
+ /* the bw essential for transmitting 16 long packets containing 252
+ * bytes meant for dcs write memory command is programmed in this
+ * register in terms of byte clocks. based on dsi transfer rate and the
+ * number of lanes configured the time taken to transmit 16 long packets
+ * in a dsi stream varies. */
+ I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
+
+ I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+ intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+
+ if (is_vid_mode(intel_dsi))
+ /* Some panels might have resolution which is not a multiple of
+ * 64 like 1366 x 768. Enable RANDOM resolution support for such
+ * panels by default */
+ I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
+ intel_dsi->video_frmt_cfg_bits |
+ intel_dsi->video_mode_format |
+ IP_TG_CONFIG |
+ RANDOM_DPI_DISPLAY_RESOLUTION);
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi_prepare(encoder);
+
+ vlv_enable_dsi_pll(encoder);
+}
+
+static enum drm_connector_status
+intel_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+ struct intel_encoder *intel_encoder = &intel_dsi->base;
+ enum intel_display_power_domain power_domain;
+ enum drm_connector_status connector_status;
+ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
+
+ DRM_DEBUG_KMS("\n");
+ power_domain = intel_display_port_power_domain(intel_encoder);
+
+ intel_display_power_get(dev_priv, power_domain);
+ connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+ intel_display_power_put(dev_priv, power_domain);
+
+ return connector_status;
+}
+
+static int intel_dsi_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!intel_connector->panel.fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ return 0;
+ }
+
+ mode = drm_mode_duplicate(connector->dev,
+ intel_connector->panel.fixed_mode);
+ if (!mode) {
+ DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+static void intel_dsi_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ DRM_DEBUG_KMS("\n");
+ intel_panel_fini(&intel_connector->panel);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dsi_detect,
+ .destroy = intel_dsi_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+bool intel_dsi_init(struct drm_device *dev)
+{
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_dsi_device *dsi;
+ unsigned int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* There is no detection method for MIPI so rely on VBT */
+ if (!dev_priv->vbt.has_mipi)
+ return false;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return false;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return false;
+ }
+
+ intel_encoder = &intel_dsi->base;
+ encoder = &intel_encoder->base;
+ intel_dsi->attached_connector = intel_connector;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else {
+ DRM_ERROR("Unsupported Mipi device to reg base");
+ return false;
+ }
+
+ connector = &intel_connector->base;
+
+ drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+
+ /* XXX: very likely not all of these are needed */
+ intel_encoder->hot_plug = intel_dsi_hot_plug;
+ intel_encoder->compute_config = intel_dsi_compute_config;
+ intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
+ intel_encoder->pre_enable = intel_dsi_pre_enable;
+ intel_encoder->enable = intel_dsi_enable_nop;
+ intel_encoder->disable = intel_dsi_pre_disable;
+ intel_encoder->post_disable = intel_dsi_post_disable;
+ intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+ intel_encoder->get_config = intel_dsi_get_config;
+
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
+
+ for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
+ dsi = &intel_dsi_devices[i];
+ intel_dsi->dev = *dsi;
+
+ if (dsi->dev_ops->init(&intel_dsi->dev))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(intel_dsi_devices)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ intel_encoder->type = INTEL_OUTPUT_DSI;
+ intel_encoder->crtc_mask = (1 << 0); /* XXX */
+
+ intel_encoder->cloneable = 0;
+ drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ drm_sysfs_connector_add(connector);
+
+ fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+ if (!fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ goto err;
+ }
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+
+ return true;
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 00000000000..31db33d3e5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dsi_device {
+ unsigned int panel_id;
+ const char *name;
+ const struct intel_dsi_dev_ops *dev_ops;
+ void *dev_priv;
+};
+
+struct intel_dsi_dev_ops {
+ bool (*init)(struct intel_dsi_device *dsi);
+
+ void (*panel_reset)(struct intel_dsi_device *dsi);
+
+ void (*disable_panel_power)(struct intel_dsi_device *dsi);
+
+ /* one time programmable commands if needed */
+ void (*send_otp_cmds)(struct intel_dsi_device *dsi);
+
+ /* This callback must be able to assume DSI commands can be sent */
+ void (*enable)(struct intel_dsi_device *dsi);
+
+ /* This callback must be able to assume DSI commands can be sent */
+ void (*disable)(struct intel_dsi_device *dsi);
+
+ int (*mode_valid)(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode);
+
+ bool (*mode_fixup)(struct intel_dsi_device *dsi,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ void (*mode_set)(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+
+ bool (*get_hw_state)(struct intel_dsi_device *dev);
+
+ struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
+
+ void (*destroy) (struct intel_dsi_device *dsi);
+};
+
+struct intel_dsi {
+ struct intel_encoder base;
+
+ struct intel_dsi_device dev;
+
+ struct intel_connector *attached_connector;
+
+ /* if true, use HS mode, otherwise LP */
+ bool hs;
+
+ /* virtual channel */
+ int channel;
+
+ /* Video mode or command mode */
+ u16 operation_mode;
+
+ /* number of DSI lanes */
+ unsigned int lane_count;
+
+ /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
+ u32 pixel_format;
+
+ /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
+ u32 video_mode_format;
+
+ /* eot for MIPI_EOT_DISABLE register */
+ u8 eotp_pkt;
+ u8 clock_stop;
+
+ u8 escape_clk_div;
+ u32 port_bits;
+ u32 bw_timer;
+ u32 dphy_reg;
+ u32 video_frmt_cfg_bits;
+ u16 lp_byte_clk;
+
+ /* timeouts in byte clocks */
+ u16 lp_rx_timeout;
+ u16 turn_arnd_val;
+ u16 rst_timer_val;
+ u16 hs_to_lp_count;
+ u16 clk_lp_to_hs_count;
+ u16 clk_hs_to_lp_count;
+
+ u16 init_count;
+
+ /* all delays in ms */
+ u16 backlight_off_delay;
+ u16 backlight_on_delay;
+ u16 panel_on_delay;
+ u16 panel_off_delay;
+ u16 panel_pwr_cycle_delay;
+};
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dsi, base.base);
+}
+
+extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
+extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+
+extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 00000000000..933c8630523
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/*
+ * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
+ * MIPI_COMMAND_ADDRESS registers.
+ *
+ * Apparently these registers provide a MIPI adapter level way to send (lots of)
+ * commands and data to the receiver, without having to write the commands and
+ * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
+ *
+ * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
+ * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
+ * framebuffer in command mode displays) these are just an optimization that can
+ * come later.
+ *
+ * For memory writes, these should probably be used for performance.
+ */
+
+static void print_stat(struct intel_dsi *intel_dsi)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ val = I915_READ(MIPI_INTR_STAT(pipe));
+
+#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
+ DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+ "\n", pipe, val,
+ STAT_BIT(val, TEARING_EFFECT),
+ STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
+ STAT_BIT(val, GEN_READ_DATA_AVAIL),
+ STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
+ STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
+ STAT_BIT(val, RX_PROT_VIOLATION),
+ STAT_BIT(val, RX_INVALID_TX_LENGTH),
+ STAT_BIT(val, ACK_WITH_NO_ERROR),
+ STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
+ STAT_BIT(val, LP_RX_TIMEOUT),
+ STAT_BIT(val, HS_TX_TIMEOUT),
+ STAT_BIT(val, DPI_FIFO_UNDERRUN),
+ STAT_BIT(val, LOW_CONTENTION),
+ STAT_BIT(val, HIGH_CONTENTION),
+ STAT_BIT(val, TXDSI_VC_ID_INVALID),
+ STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
+ STAT_BIT(val, TXCHECKSUM_ERROR),
+ STAT_BIT(val, TXECC_MULTIBIT_ERROR),
+ STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
+ STAT_BIT(val, TXFALSE_CONTROL_ERROR),
+ STAT_BIT(val, RXDSI_VC_ID_INVALID),
+ STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
+ STAT_BIT(val, RXCHECKSUM_ERROR),
+ STAT_BIT(val, RXECC_MULTIBIT_ERROR),
+ STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
+ STAT_BIT(val, RXFALSE_CONTROL_ERROR),
+ STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
+ STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
+ STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
+ STAT_BIT(val, RXEOT_SYNC_ERROR),
+ STAT_BIT(val, RXSOT_SYNC_ERROR),
+ STAT_BIT(val, RXSOT_ERROR));
+#undef STAT_BIT
+}
+
+enum dsi_type {
+ DSI_DCS,
+ DSI_GENERIC,
+};
+
+/* enable or disable command mode hs transmissions */
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 temp;
+ u32 mask = DBI_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+
+ temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
+ temp &= DBI_HS_LP_MODE_MASK;
+ I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
+
+ intel_dsi->hs = enable;
+}
+
+static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
+ u8 data_type, u16 data)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 ctrl_reg;
+ u32 ctrl;
+ u32 mask;
+
+ DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
+ channel, data_type, data);
+
+ if (intel_dsi->hs) {
+ ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
+ mask = HS_CTRL_FIFO_FULL;
+ } else {
+ ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
+ mask = LP_CTRL_FIFO_FULL;
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
+ DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ print_stat(intel_dsi);
+ }
+
+ /*
+ * Note: This function is also used for long packets, with length passed
+ * as data, since SHORT_PACKET_PARAM_SHIFT ==
+ * LONG_PACKET_WORD_COUNT_SHIFT.
+ */
+ ctrl = data << SHORT_PACKET_PARAM_SHIFT |
+ channel << VIRTUAL_CHANNEL_SHIFT |
+ data_type << DATA_TYPE_SHIFT;
+
+ I915_WRITE(ctrl_reg, ctrl);
+
+ return 0;
+}
+
+static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
+ u8 data_type, const u8 *data, int len)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 data_reg;
+ int i, j, n;
+ u32 mask;
+
+ DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
+ channel, data_type, len);
+
+ if (intel_dsi->hs) {
+ data_reg = MIPI_HS_GEN_DATA(pipe);
+ mask = HS_DATA_FIFO_FULL;
+ } else {
+ data_reg = MIPI_LP_GEN_DATA(pipe);
+ mask = LP_DATA_FIFO_FULL;
+ }
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
+ DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+ for (i = 0; i < len; i += n) {
+ u32 val = 0;
+ n = min_t(int, len - i, 4);
+
+ for (j = 0; j < n; j++)
+ val |= *data++ << 8 * j;
+
+ I915_WRITE(data_reg, val);
+ /* XXX: check for data fifo full, once that is set, write 4
+ * dwords, then wait for not set, then continue. */
+ }
+
+ return dsi_vc_send_short(intel_dsi, channel, data_type, len);
+}
+
+static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
+ int channel, const u8 *data, int len,
+ enum dsi_type type)
+{
+ int ret;
+
+ if (len == 0) {
+ BUG_ON(type == DSI_GENERIC);
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
+ 0);
+ } else if (len == 1) {
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE, data[0]);
+ } else if (len == 2) {
+ ret = dsi_vc_send_short(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
+ MIPI_DSI_DCS_SHORT_WRITE_PARAM,
+ (data[1] << 8) | data[0]);
+ } else {
+ ret = dsi_vc_send_long(intel_dsi, channel,
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_LONG_WRITE :
+ MIPI_DSI_DCS_LONG_WRITE, data, len);
+ }
+
+ return ret;
+}
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len)
+{
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
+}
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len)
+{
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
+}
+
+static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd)
+{
+ return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
+ dcs_cmd);
+}
+
+static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
+ int channel, u8 *reqdata,
+ int reqlen)
+{
+ u16 data;
+ u8 data_type;
+
+ switch (reqlen) {
+ case 0:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+ data = 0;
+ break;
+ case 1:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+ data = reqdata[0];
+ break;
+ case 2:
+ data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+ data = (reqdata[1] << 8) | reqdata[0];
+ break;
+ default:
+ BUG();
+ }
+
+ return dsi_vc_send_short(intel_dsi, channel, data_type, data);
+}
+
+static int dsi_read_data_return(struct intel_dsi *intel_dsi,
+ u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int i, len = 0;
+ u32 data_reg, val;
+
+ if (intel_dsi->hs) {
+ data_reg = MIPI_HS_GEN_DATA(pipe);
+ } else {
+ data_reg = MIPI_LP_GEN_DATA(pipe);
+ }
+
+ while (len < buflen) {
+ val = I915_READ(data_reg);
+ for (i = 0; i < 4 && len < buflen; i++, len++)
+ buf[len] = val >> 8 * i;
+ }
+
+ return len;
+}
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+ int ret;
+
+ /*
+ * XXX: should issue multiple read requests and reads if request is
+ * longer than MIPI_MAX_RETURN_PKT_SIZE
+ */
+
+ I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+ ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
+ if (ret)
+ return ret;
+
+ mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ if (ret < 0)
+ return ret;
+
+ if (ret != buflen)
+ return -EIO;
+
+ return 0;
+}
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+ u8 *reqdata, int reqlen, u8 *buf, int buflen)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+ int ret;
+
+ /*
+ * XXX: should issue multiple read requests and reads if request is
+ * longer than MIPI_MAX_RETURN_PKT_SIZE
+ */
+
+ I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+ ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
+ reqlen);
+ if (ret)
+ return ret;
+
+ mask = GEN_READ_DATA_AVAIL;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ DRM_ERROR("Timeout waiting for read data.\n");
+
+ ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ if (ret < 0)
+ return ret;
+
+ if (ret != buflen)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+
+ /* XXX: pipe, hs */
+ if (hs)
+ cmd &= ~DPI_LP_MODE;
+ else
+ cmd |= DPI_LP_MODE;
+
+ /* clear bit */
+ I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
+
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
+ DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+ I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
+
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
+ DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 00000000000..9a18cbfa546
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#ifndef _INTEL_DSI_DSI_H
+#define _INTEL_DSI_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+#define DPI_LP_MODE_EN false
+#define DPI_HS_MODE_EN true
+
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len);
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+ const u8 *data, int len);
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+ u8 *buf, int buflen);
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+ u8 *reqdata, int reqlen, u8 *buf, int buflen);
+
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
+
+/* XXX: questionable write helpers */
+static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd)
+{
+ return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
+}
+
+static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
+ int channel, u8 dcs_cmd, u8 param)
+{
+ u8 buf[2] = { dcs_cmd, param };
+ return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
+}
+
+static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
+ int channel)
+{
+ return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
+}
+
+static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
+ int channel, u8 param)
+{
+ return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
+}
+
+static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
+ int channel, u8 param1, u8 param2)
+{
+ u8 buf[2] = { param1, param2 };
+ return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
+}
+
+/* XXX: questionable read helpers */
+static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
+ int channel, u8 *buf, int buflen)
+{
+ return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
+ int channel, u8 param, u8 *buf,
+ int buflen)
+{
+ return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
+ int channel, u8 param1, u8 param2,
+ u8 *buf, int buflen)
+{
+ u8 req[2] = { param1, param2 };
+
+ return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
+}
+
+
+#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
new file mode 100644
index 00000000000..21a0d348ced
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include <video/mipi_display.h>
+#include <asm/intel-mid.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+#define MIPI_TRANSFER_MODE_SHIFT 0
+#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
+#define MIPI_PORT_SHIFT 3
+
+#define PREPARE_CNT_MAX 0x3F
+#define EXIT_ZERO_CNT_MAX 0x3F
+#define CLK_ZERO_CNT_MAX 0xFF
+#define TRAIL_CNT_MAX 0x1F
+
+#define NS_KHZ_RATIO 1000000
+
+#define GPI0_NC_0_HV_DDI0_HPD 0x4130
+#define GPIO_NC_0_HV_DDI0_PAD 0x4138
+#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
+#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
+#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
+#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
+#define GPIO_NC_3_PANEL0_VDDEN 0x4140
+#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
+#define GPIO_NC_4_PANEL0_BLKEN 0x4150
+#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
+#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
+#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
+#define GPIO_NC_6_PCONF0 0x4180
+#define GPIO_NC_6_PAD 0x4188
+#define GPIO_NC_7_PCONF0 0x4190
+#define GPIO_NC_7_PAD 0x4198
+#define GPIO_NC_8_PCONF0 0x4170
+#define GPIO_NC_8_PAD 0x4178
+#define GPIO_NC_9_PCONF0 0x4100
+#define GPIO_NC_9_PAD 0x4108
+#define GPIO_NC_10_PCONF0 0x40E0
+#define GPIO_NC_10_PAD 0x40E8
+#define GPIO_NC_11_PCONF0 0x40F0
+#define GPIO_NC_11_PAD 0x40F8
+
+struct gpio_table {
+ u16 function_reg;
+ u16 pad_reg;
+ u8 init;
+};
+
+static struct gpio_table gtable[] = {
+ { GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
+ { GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
+ { GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
+ { GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
+ { GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
+ { GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
+ { GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
+ { GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
+ { GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
+ { GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
+ { GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
+ { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
+};
+
+static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
+{
+ u8 type, byte, mode, vc, port;
+ u16 len;
+
+ byte = *data++;
+ mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
+ vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
+ port = (byte >> MIPI_PORT_SHIFT) & 0x3;
+
+ /* LP or HS mode */
+ intel_dsi->hs = mode;
+
+ /* get packet type and increment the pointer */
+ type = *data++;
+
+ len = *((u16 *) data);
+ data += 2;
+
+ switch (type) {
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ dsi_vc_generic_write_0(intel_dsi, vc);
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ dsi_vc_generic_write_1(intel_dsi, vc, *data);
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
+ break;
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+ break;
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ dsi_vc_generic_write(intel_dsi, vc, data, len);
+ break;
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ dsi_vc_dcs_write_0(intel_dsi, vc, *data);
+ break;
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
+ break;
+ case MIPI_DSI_DCS_READ:
+ DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+ break;
+ case MIPI_DSI_DCS_LONG_WRITE:
+ dsi_vc_dcs_write(intel_dsi, vc, data, len);
+ break;
+ };
+
+ data += len;
+
+ return data;
+}
+
+static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
+{
+ u32 delay = *((u32 *) data);
+
+ usleep_range(delay, delay + 10);
+ data += 4;
+
+ return data;
+}
+
+static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
+{
+ u8 gpio, action;
+ u16 function, pad;
+ u32 val;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gpio = *data++;
+
+ /* pull up/down */
+ action = *data++;
+
+ function = gtable[gpio].function_reg;
+ pad = gtable[gpio].pad_reg;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ if (!gtable[gpio].init) {
+ /* program the function */
+ /* FIXME: remove constant below */
+ vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
+ gtable[gpio].init = 1;
+ }
+
+ val = 0x4 | action;
+
+ /* pull up/down */
+ vlv_gpio_nc_write(dev_priv, pad, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return data;
+}
+
+typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
+static const fn_mipi_elem_exec exec_elem[] = {
+ NULL, /* reserved */
+ mipi_exec_send_packet,
+ mipi_exec_delay,
+ mipi_exec_gpio,
+ NULL, /* status read; later */
+};
+
+/*
+ * MIPI Sequence from VBT #53 parsing logic
+ * We have already separated each seqence during bios parsing
+ * Following is generic execution function for any sequence
+ */
+
+static const char * const seq_name[] = {
+ "UNDEFINED",
+ "MIPI_SEQ_ASSERT_RESET",
+ "MIPI_SEQ_INIT_OTP",
+ "MIPI_SEQ_DISPLAY_ON",
+ "MIPI_SEQ_DISPLAY_OFF",
+ "MIPI_SEQ_DEASSERT_RESET"
+};
+
+static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
+{
+ u8 *data = sequence;
+ fn_mipi_elem_exec mipi_elem_exec;
+ int index;
+
+ if (!sequence)
+ return;
+
+ DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
+
+ /* go to the first element of the sequence */
+ data++;
+
+ /* parse each byte till we reach end of sequence byte - 0x00 */
+ while (1) {
+ index = *data;
+ mipi_elem_exec = exec_elem[index];
+ if (!mipi_elem_exec) {
+ DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
+ return;
+ }
+
+ /* goto element payload */
+ data++;
+
+ /* execute the element specific rotines */
+ data = mipi_elem_exec(intel_dsi, data);
+
+ /*
+ * After processing the element, data should point to
+ * next element or end of sequence
+ * check if have we reached end of sequence
+ */
+ if (*data == 0x00)
+ break;
+ }
+}
+
+static bool generic_init(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+ struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ u32 bits_per_pixel = 24;
+ u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+ intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+ intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+ intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+
+ if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
+ bits_per_pixel = 18;
+ else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
+ bits_per_pixel = 16;
+
+ bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
+
+ intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+ intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+ intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+ intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+ intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+ intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+ intel_dsi->init_count = mipi_config->master_init_timer;
+ intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+ intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+
+ switch (intel_dsi->escape_clk_div) {
+ case 0:
+ tlpx_ns = 50;
+ break;
+ case 1:
+ tlpx_ns = 100;
+ break;
+
+ case 2:
+ tlpx_ns = 200;
+ break;
+ default:
+ tlpx_ns = 50;
+ break;
+ }
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ case 2:
+ extra_byte_count = 2;
+ break;
+ case 3:
+ extra_byte_count = 4;
+ break;
+ case 4:
+ default:
+ extra_byte_count = 3;
+ break;
+ }
+
+ /*
+ * ui(s) = 1/f [f in hz]
+ * ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
+ */
+
+ /* in Kbps */
+ ui_num = NS_KHZ_RATIO;
+ ui_den = bitrate;
+
+ tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
+ ths_prepare_hszero = mipi_config->ths_prepare_hszero;
+
+ /*
+ * B060
+ * LP byte clock = TLPX/ (8UI)
+ */
+ intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
+
+ /* count values in UI = (ns value) * (bitrate / (2 * 10^6))
+ *
+ * Since txddrclkhs_i is 2xUI, all the count values programmed in
+ * DPHY param register are divided by 2
+ *
+ * prepare count
+ */
+ ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
+
+ /* exit zero count */
+ exit_zero_cnt = DIV_ROUND_UP(
+ (ths_prepare_hszero - ths_prepare_ns) * ui_den,
+ ui_num * 2
+ );
+
+ /*
+ * Exit zero is unified val ths_zero and ths_exit
+ * minimum value for ths_exit = 110ns
+ * min (exit_zero_cnt * 2) = 110/UI
+ * exit_zero_cnt = 55/UI
+ */
+ if (exit_zero_cnt < (55 * ui_den / ui_num))
+ if ((55 * ui_den) % ui_num)
+ exit_zero_cnt += 1;
+
+ /* clk zero count */
+ clk_zero_cnt = DIV_ROUND_UP(
+ (tclk_prepare_clkzero - ths_prepare_ns)
+ * ui_den, 2 * ui_num);
+
+ /* trail count */
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
+
+ if (prepare_cnt > PREPARE_CNT_MAX ||
+ exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
+ clk_zero_cnt > CLK_ZERO_CNT_MAX ||
+ trail_cnt > TRAIL_CNT_MAX)
+ DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
+
+ if (prepare_cnt > PREPARE_CNT_MAX)
+ prepare_cnt = PREPARE_CNT_MAX;
+
+ if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
+ exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+
+ if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
+ clk_zero_cnt = CLK_ZERO_CNT_MAX;
+
+ if (trail_cnt > TRAIL_CNT_MAX)
+ trail_cnt = TRAIL_CNT_MAX;
+
+ /* B080 */
+ intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
+ clk_zero_cnt << 8 | prepare_cnt;
+
+ /*
+ * LP to HS switch count = 4TLPX + PREP_COUNT * 2 + EXIT_ZERO_COUNT * 2
+ * + 10UI + Extra Byte Count
+ *
+ * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
+ * Extra Byte Count is calculated according to number of lanes.
+ * High Low Switch Count is the Max of LP to HS and
+ * HS to LP switch count
+ *
+ */
+ tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
+
+ /* B044 */
+ /* FIXME:
+ * The comment above does not match with the code */
+ lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * 2 +
+ exit_zero_cnt * 2 + 10, 8);
+
+ hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
+
+ intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
+ intel_dsi->hs_to_lp_count += extra_byte_count;
+
+ /* B088 */
+ /* LP -> HS for clock lanes
+ * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
+ * extra byte count
+ * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
+ * 2(in UI) + extra byte count
+ * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
+ * 8 + extra byte count
+ */
+ intel_dsi->clk_lp_to_hs_count =
+ DIV_ROUND_UP(
+ 4 * tlpx_ui + prepare_cnt * 2 +
+ clk_zero_cnt * 2,
+ 8);
+
+ intel_dsi->clk_lp_to_hs_count += extra_byte_count;
+
+ /* HS->LP for Clock Lanes
+ * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
+ * Extra byte count
+ * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
+ * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
+ * Extra byte count
+ */
+ intel_dsi->clk_hs_to_lp_count =
+ DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
+ 8);
+ intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+
+ DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
+ "disabled" : "enabled");
+ DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
+ DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
+ DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
+ DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ DRM_DEBUG_KMS("BTA %s\n",
+ intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
+ "disabled" : "enabled");
+
+ /* delays in VBT are in unit of 100us, so need to convert
+ * here in ms
+ * Delay (100us) * 100 /1000 = Delay / 10 (ms) */
+ intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10;
+ intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10;
+ intel_dsi->panel_on_delay = pps->panel_on_delay / 10;
+ intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
+ intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
+
+ return true;
+}
+
+static int generic_mode_valid(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static bool generic_mode_fixup(struct intel_dsi_device *dsi,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode) {
+ return true;
+}
+
+static void generic_panel_reset(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_disable_panel_power(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_enable(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_disable(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
+{
+ return connector_status_connected;
+}
+
+static bool generic_get_hw_state(struct intel_dsi_device *dev)
+{
+ return true;
+}
+
+static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ return dev_priv->vbt.lfp_lvds_vbt_mode;
+}
+
+static void generic_destroy(struct intel_dsi_device *dsi) { }
+
+/* Callbacks. We might not need them all. */
+struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
+ .init = generic_init,
+ .mode_valid = generic_mode_valid,
+ .mode_fixup = generic_mode_fixup,
+ .panel_reset = generic_panel_reset,
+ .disable_panel_power = generic_disable_panel_power,
+ .send_otp_cmds = generic_send_otp_cmds,
+ .enable = generic_enable,
+ .disable = generic_disable,
+ .detect = generic_detect,
+ .get_hw_state = generic_get_hw_state,
+ .get_modes = generic_get_modes,
+ .destroy = generic_destroy,
+};
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 00000000000..ba79ec19da3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Shobhit Kumar <shobhit.kumar@intel.com>
+ * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+#include "intel_dsi.h"
+
+#define DSI_HSS_PACKET_SIZE 4
+#define DSI_HSE_PACKET_SIZE 4
+#define DSI_HSA_PACKET_EXTRA_SIZE 6
+#define DSI_HBP_PACKET_EXTRA_SIZE 6
+#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
+#define DSI_HFP_PACKET_EXTRA_SIZE 6
+#define DSI_EOTP_PACKET_SIZE 4
+
+struct dsi_mnp {
+ u32 dsi_pll_ctrl;
+ u32 dsi_pll_div;
+};
+
+static const u32 lfsr_converts[] = {
+ 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
+ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35 /* 91 - 92 */
+};
+
+#ifdef DSI_CLK_FROM_RR
+
+static u32 dsi_rr_formula(const struct drm_display_mode *mode,
+ int pixel_format, int video_mode_format,
+ int lane_count, bool eotp)
+{
+ u32 bpp;
+ u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
+ u32 bytes_per_line, bytes_per_frame;
+ u32 num_frames;
+ u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
+ u32 dsi_bit_clock_hz;
+ u32 dsi_clk;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ hactive = mode->hdisplay;
+ vactive = mode->vdisplay;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsync = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
+ hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
+ hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
+ hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
+
+ bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
+ DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
+ hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
+ hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
+ hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
+
+ /*
+ * XXX: Need to accurately calculate LP to HS transition timeout and add
+ * it to bytes_per_line/bytes_per_frame.
+ */
+
+ if (eotp && video_mode_format == VIDEO_MODE_BURST)
+ bytes_per_line += DSI_EOTP_PACKET_SIZE;
+
+ bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
+ vactive * bytes_per_line + vfp * bytes_per_line;
+
+ if (eotp &&
+ (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
+ video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
+ bytes_per_frame += DSI_EOTP_PACKET_SIZE;
+
+ num_frames = drm_mode_vrefresh(mode);
+ bytes_per_x_frames = num_frames * bytes_per_frame;
+
+ bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
+
+ /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
+ dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
+ dsi_clk = dsi_bit_clock_hz / 1000;
+
+ if (eotp && video_mode_format == VIDEO_MODE_BURST)
+ dsi_clk *= 2;
+
+ return dsi_clk;
+}
+
+#else
+
+/* Get DSI clock from pixel clock */
+static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
+ int pixel_format, int lane_count)
+{
+ u32 dsi_clk_khz;
+ u32 bpp;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ /* DSI data rate = pixel clock * bits per pixel / lane count
+ pixel clock is converted from KHz to Hz */
+ dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
+
+ return dsi_clk_khz;
+}
+
+#endif
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+ u32 m, n, p;
+ u32 ref_clk;
+ u32 error;
+ u32 tmp_error;
+ int target_dsi_clk;
+ int calc_dsi_clk;
+ u32 calc_m;
+ u32 calc_p;
+ u32 m_seed;
+
+ /* dsi_clk is expected in KHZ */
+ if (dsi_clk < 300000 || dsi_clk > 1150000) {
+ DRM_ERROR("DSI CLK Out of Range\n");
+ return -ECHRNG;
+ }
+
+ ref_clk = 25000;
+ target_dsi_clk = dsi_clk;
+ error = 0xFFFFFFFF;
+ tmp_error = 0xFFFFFFFF;
+ calc_m = 0;
+ calc_p = 0;
+
+ for (m = 62; m <= 92; m++) {
+ for (p = 2; p <= 6; p++) {
+ /* Find the optimal m and p divisors
+ with minimal error +/- the required clock */
+ calc_dsi_clk = (m * ref_clk) / p;
+ if (calc_dsi_clk == target_dsi_clk) {
+ calc_m = m;
+ calc_p = p;
+ error = 0;
+ break;
+ } else
+ tmp_error = abs(target_dsi_clk - calc_dsi_clk);
+
+ if (tmp_error < error) {
+ error = tmp_error;
+ calc_m = m;
+ calc_p = p;
+ }
+ }
+
+ if (error == 0)
+ break;
+ }
+
+ m_seed = lfsr_converts[calc_m - 62];
+ n = 1;
+ dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+ dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+ m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+ return 0;
+}
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int ret;
+ struct dsi_mnp dsi_mnp;
+ u32 dsi_clk;
+
+ dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
+ intel_dsi->lane_count);
+
+ ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+ if (ret) {
+ DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+ return;
+ }
+
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+ DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
+ dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+}
+
+void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ vlv_configure_dsi_pll(encoder);
+
+ /* wait at least 0.5 us after ungating before enabling VCO */
+ usleep_range(1, 10);
+
+ tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp |= DSI_PLL_VCO_EN;
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+ DRM_ERROR("DSI PLL lock failed\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp &= ~DSI_PLL_VCO_EN;
+ tmp |= DSI_PLL_LDO_GATE;
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a4d2606de77..a3631c0a5c2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -25,19 +25,20 @@
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "dvo.h"
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
-static struct intel_dvo_device intel_dvo_devices[] = {
+static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
@@ -53,6 +54,13 @@ static struct intel_dvo_device intel_dvo_devices[] = {
.dev_ops = &ch7xxx_ops,
},
{
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .slave_addr = 0x75, /* For some ch7010 */
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
.dvo_reg = DVOA,
@@ -71,94 +79,195 @@ static struct intel_dvo_device intel_dvo_devices[] = {
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
- .gpio = GPIOE,
+ .gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
- }
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ns2501",
+ .dvo_reg = DVOC,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ }
};
-static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+struct intel_dvo {
+ struct intel_encoder base;
+
+ struct intel_dvo_device dev;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dvo, base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+ return enc_to_dvo(intel_attached_encoder(connector));
+}
+
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_dvo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ u32 tmp, flags = 0;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ if (tmp & DVO_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (tmp & DVO_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
- u32 dvo_reg = dvo->dvo_reg;
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
- if (mode == DRM_MODE_DPMS_ON) {
- I915_WRITE(dvo_reg, temp | DVO_ENABLE);
- I915_READ(dvo_reg);
- dvo->dev_ops->dpms(dvo, mode);
- } else {
- dvo->dev_ops->dpms(dvo, mode);
- I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
- I915_READ(dvo_reg);
- }
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
}
-static void intel_dvo_save(struct drm_connector *connector)
+static void intel_enable_dvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
- /* Each output should probably just save the registers it touches,
- * but for now, use more overkill.
- */
- dev_priv->saveDVOA = I915_READ(DVOA);
- dev_priv->saveDVOB = I915_READ(DVOB);
- dev_priv->saveDVOC = I915_READ(DVOC);
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &crtc->config.requested_mode,
+ &crtc->config.adjusted_mode);
- dvo->dev_ops->save(dvo);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
-static void intel_dvo_restore(struct drm_connector *connector)
+/* Special dpms function to support cloning between dvo/sdvo/crt. */
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
+ struct intel_crtc_config *config;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_dvo->base.base.crtc;
+ if (!crtc) {
+ intel_dvo->base.connectors_active = false;
+ return;
+ }
- dvo->dev_ops->restore(dvo);
+ /* We call connector dpms manually below in case pipe dpms doesn't
+ * change due to cloning. */
+ if (mode == DRM_MODE_DPMS_ON) {
+ config = &to_intel_crtc(crtc)->config;
+
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &config->requested_mode,
+ &config->adjusted_mode);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+ } else {
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+ intel_dvo->base.connectors_active = false;
- I915_WRITE(DVOA, dev_priv->saveDVOA);
- I915_WRITE(DVOB, dev_priv->saveDVOB);
- I915_WRITE(DVOC, dev_priv->saveDVOC);
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
}
-static int intel_dvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
- if (dvo->panel_fixed_mode) {
- if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
+ if (intel_dvo->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
- return dvo->dev_ops->mode_valid(dvo, mode);
+ return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
-static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_dvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (dvo->panel_fixed_mode != NULL) {
-#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
+ if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
@@ -168,29 +277,24 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
C(vsync_end);
C(vtotal);
C(clock);
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
#undef C
- }
- if (dvo->dev_ops->mode_fixup)
- return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ }
return true;
}
-static void intel_dvo_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_dvo_pre_enable(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
- int pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ int pipe = crtc->pipe;
u32 dvo_val;
- u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
switch (dvo_reg) {
case DVOA:
@@ -205,8 +309,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
- dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
-
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
@@ -221,8 +323,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
-
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -238,87 +338,50 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*
* Unimplemented.
*/
-static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-
- return dvo->dev_ops->detect(dvo);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(intel_output);
+ intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
-
- if (dvo->panel_fixed_mode != NULL) {
+ if (intel_dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
+ mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
+
return 0;
}
-static void intel_dvo_destroy (struct drm_connector *connector)
+static void intel_dvo_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- /* no need, in i830_dvoices[] now */
- //kfree(dvo);
- }
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
-}
-
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
- return intel_pipe_to_crtc(pScrn, pipe);
+ kfree(connector);
}
-#endif
-
-static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
- .dpms = intel_dvo_dpms,
- .mode_fixup = intel_dvo_mode_fixup,
- .prepare = intel_encoder_prepare,
- .mode_set = intel_dvo_mode_set,
- .commit = intel_encoder_commit,
-};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .save = intel_dvo_save,
- .restore = intel_dvo_restore,
+ .dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -332,14 +395,20 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
- drm_encoder_cleanup(encoder);
+ struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
+
+ if (intel_dvo->dev.dev_ops->destroy)
+ intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+ kfree(intel_dvo->panel_fixed_mode);
+
+ intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
@@ -347,14 +416,12 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
-intel_dvo_get_current_mode (struct drm_connector *connector)
+intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
- uint32_t dvo_reg = dvo->dvo_reg;
- uint32_t dvo_val = I915_READ(dvo_reg);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
@@ -364,10 +431,9 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
struct drm_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
-
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -377,78 +443,94 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
}
}
}
+
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
- struct intel_output *intel_output;
- struct intel_dvo_device *dvo;
- struct i2c_adapter *i2cbus = NULL;
- int ret = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
+ struct intel_connector *intel_connector;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
- intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output)
+
+ intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
+ if (!intel_dvo)
return;
- /* Set up the DDC bus */
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
- if (!intel_output->ddc_bus)
- goto free_intel;
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dvo);
+ return;
+ }
+
+ intel_encoder = &intel_dvo->base;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type);
+
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_encoder->get_config = intel_dvo_get_config;
+ intel_encoder->compute_config = intel_dvo_compute_config;
+ intel_encoder->pre_enable = intel_dvo_pre_enable;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_output->base;
+ struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
int gpio;
-
- dvo = &intel_dvo_devices[i];
+ bool dvoinit;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
*/
- if (dvo->gpio != 0)
+ if (intel_gmbus_is_port_valid(dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
- gpio = GPIOB;
+ gpio = GMBUS_PORT_SSC;
else
- gpio = GPIOE;
+ gpio = GMBUS_PORT_DPB;
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
- if (dvo->dev_ops!= NULL)
- ret = dvo->dev_ops->init(dvo, i2cbus);
- else
- ret = false;
+ intel_dvo->dev = *dvo;
- if (!ret)
+ /* GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ intel_gmbus_force_bit(i2c, false);
+
+ if (!dvoinit)
continue;
- intel_output->type = INTEL_OUTPUT_DVO;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
- intel_output->clone_mask =
- (1 << INTEL_DVO_TMDS_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
+ (1 << INTEL_OUTPUT_DVO);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
- intel_output->clone_mask =
- (1 << INTEL_DVO_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = 0;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -462,16 +544,7 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- intel_output->dev_priv = dvo;
- intel_output->i2c_bus = i2cbus;
-
- drm_encoder_init(dev, &intel_output->enc,
- &intel_dvo_enc_funcs, encoder_type);
- drm_encoder_helper_add(&intel_output->enc,
- &intel_dvo_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
@@ -480,19 +553,16 @@ void intel_dvo_init(struct drm_device *dev)
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
- dvo->panel_fixed_mode =
+ intel_dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
- dvo->panel_wants_dither = true;
+ intel_dvo->panel_wants_dither = true;
}
drm_sysfs_connector_add(connector);
return;
}
- intel_i2c_destroy(intel_output->ddc_bus);
- /* Didn't find a chip, so tear down. */
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
-free_intel:
- kfree(intel_output);
+ drm_encoder_cleanup(&intel_encoder->base);
+ kfree(intel_dvo);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
deleted file mode 100644
index 371d753e362..00000000000
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * David Airlie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/slab.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-#include "drm_fb_helper.h"
-#include "intel_drv.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-struct intelfb_par {
- struct drm_fb_helper helper;
- struct intel_framebuffer *intel_fb;
- struct drm_display_mode *our_mode;
-};
-
-static struct fb_ops intelfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .gamma_set = intel_crtc_fb_gamma_set,
- .gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (!fb)
- return 1;
-
- info = fb->fbdev;
- if (!info)
- return 1;
-
- if (!mode)
- return 1;
-
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
-{
- struct fb_info *info;
- struct intelfb_par *par;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
- struct drm_mode_fb_cmd mode_cmd;
- struct drm_gem_object *fbo = NULL;
- struct drm_i915_gem_object *obj_priv;
- struct device *device = &dev->pdev->dev;
- int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
-
- /* we don't do packed 24bpp */
- if (surface_bpp == 24)
- surface_bpp = 32;
-
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
-
- mode_cmd.bpp = surface_bpp;
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = surface_depth;
-
- size = mode_cmd.pitch * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
- fbo = drm_gem_object_alloc(dev, size);
- if (!fbo) {
- DRM_ERROR("failed to allocate framebuffer\n");
- ret = -ENOMEM;
- goto out;
- }
- obj_priv = fbo->driver_private;
-
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_pin(fbo, PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to pin fb: %d\n", ret);
- goto out_unref;
- }
-
- /* Flush everything out, we'll be doing GTT only from now on */
- i915_gem_object_set_to_gtt_domain(fbo, 1);
-
- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
- if (ret) {
- DRM_ERROR("failed to allocate fb.\n");
- goto out_unpin;
- }
-
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- intel_fb = to_intel_framebuffer(fb);
- *fb_p = fb;
-
- info = framebuffer_alloc(sizeof(struct intelfb_par), device);
- if (!info) {
- ret = -ENOMEM;
- goto out_unpin;
- }
-
- par = info->par;
-
- par->helper.funcs = &intel_fb_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
- INTELFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
-
- strcpy(info->fix.id, "inteldrmfb");
-
- info->flags = FBINFO_DEFAULT;
-
- info->fbops = &intelfb_ops;
-
-
- /* setup aperture base/size for vesafb takeover */
- info->aperture_base = dev->mode_config.fb_base;
- if (IS_I9XX(dev))
- info->aperture_size = pci_resource_len(dev->pdev, 2);
- else
- info->aperture_size = pci_resource_len(dev->pdev, 0);
-
- info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
- info->fix.smem_len = size;
-
- info->flags = FBINFO_DEFAULT;
-
- info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
- size);
- if (!info->screen_base) {
- ret = -ENOSPC;
- goto out_unpin;
- }
- info->screen_size = size;
-
-// memset(info->screen_base, 0, size);
-
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
-
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
- info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
-
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
-
- fb->fbdev = info;
-
- par->intel_fb = intel_fb;
-
- /* To allow resizeing without swapping buffers */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
- intel_fb->base.width, intel_fb->base.height,
- obj_priv->gtt_offset, fbo);
-
- mutex_unlock(&dev->struct_mutex);
- return 0;
-
-out_unpin:
- i915_gem_object_unpin(fbo);
-out_unref:
- drm_gem_object_unreference(fbo);
- mutex_unlock(&dev->struct_mutex);
-out:
- return ret;
-}
-
-int intelfb_probe(struct drm_device *dev)
-{
- int ret;
-
- DRM_DEBUG_KMS("\n");
- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
- return ret;
-}
-EXPORT_SYMBOL(intelfb_probe);
-
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
-{
- struct fb_info *info;
-
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
-
- if (info) {
- struct intelfb_par *par = info->par;
- unregister_framebuffer(info);
- iounmap(info->screen_base);
- if (info->par)
- drm_fb_helper_free(&par->helper);
- framebuffer_release(info);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(intelfb_remove);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
new file mode 100644
index 00000000000..088fe9378a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static struct fb_ops intelfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int intelfb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev =
+ container_of(helper, struct intel_fbdev, helper);
+ struct drm_framebuffer *fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_i915_gem_object *obj;
+ int size, ret;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+ obj = i915_gem_object_create_stolen(dev, size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, size);
+ if (!obj) {
+ DRM_ERROR("failed to allocate framebuffer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin obj: %d\n", ret);
+ goto out_unref;
+ }
+
+ fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto out_unpin;
+ }
+
+ ifbdev->fb = to_intel_framebuffer(fb);
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_ggtt_unpin(obj);
+out_unref:
+ drm_gem_object_unreference(&obj->base);
+out:
+ return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev =
+ container_of(helper, struct intel_fbdev, helper);
+ struct intel_framebuffer *intel_fb = ifbdev->fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ int size, ret;
+ bool prealloc = false;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (intel_fb &&
+ (sizes->fb_width > intel_fb->base.width ||
+ sizes->fb_height > intel_fb->base.height)) {
+ DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
+ drm_framebuffer_unreference(&intel_fb->base);
+ intel_fb = ifbdev->fb = NULL;
+ }
+ if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+ DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+ ret = intelfb_alloc(helper, sizes);
+ if (ret)
+ goto out_unlock;
+ intel_fb = ifbdev->fb;
+ } else {
+ DRM_DEBUG_KMS("re-using BIOS fb\n");
+ prealloc = true;
+ sizes->fb_width = intel_fb->base.width;
+ sizes->fb_height = intel_fb->base.height;
+ }
+
+ obj = intel_fb->obj;
+ size = obj->base.size;
+
+ info = framebuffer_alloc(0, &dev->pdev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ info->par = helper;
+
+ fb = &ifbdev->fb->base;
+
+ ifbdev->helper.fb = fb;
+ ifbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "inteldrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &intelfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+
+ info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
+ info->fix.smem_len = size;
+
+ info->screen_base =
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+ size);
+ if (!info->screen_base) {
+ ret = -ENOSPC;
+ goto out_unpin;
+ }
+ info->screen_size = size;
+
+ /* This driver doesn't need a VT switch to restore the mode on resume */
+ info->skip_vt_switch = true;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ /* If the object is shmemfs backed, it will have given us zeroed pages.
+ * If the object is stolen however, it will be full of whatever
+ * garbage was left in there.
+ */
+ if (ifbdev->fb->obj->stolen && !prealloc)
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
+ fb->width, fb->height,
+ i915_gem_obj_ggtt_offset(obj), obj);
+
+ mutex_unlock(&dev->struct_mutex);
+ vga_switcheroo_client_fb_set(dev->pdev, info);
+ return 0;
+
+out_unpin:
+ i915_gem_object_ggtt_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
+static struct drm_fb_helper_crtc *
+intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
+ return &fb_helper->crtc_info[i];
+
+ return NULL;
+}
+
+/*
+ * Try to read the BIOS display configuration and use it for the initial
+ * fb configuration.
+ *
+ * The BIOS or boot loader will generally create an initial display
+ * configuration for us that includes some set of active pipes and displays.
+ * This routine tries to figure out which pipes and connectors are active
+ * and stuffs them into the crtcs and modes array given to us by the
+ * drm_fb_helper code.
+ *
+ * The overall sequence is:
+ * intel_fbdev_init - from driver load
+ * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
+ * drm_fb_helper_init - build fb helper structs
+ * drm_fb_helper_single_add_all_connectors - more fb helper structs
+ * intel_fbdev_initial_config - apply the config
+ * drm_fb_helper_initial_config - call ->probe then register_framebuffer()
+ * drm_setup_crtcs - build crtc config for fbdev
+ * intel_fb_initial_config - find active connectors etc
+ * drm_fb_helper_single_fb_probe - set up fbdev
+ * intelfb_create - re-use or alloc fb, build out fbdev structs
+ *
+ * Note that we don't make special consideration whether we could actually
+ * switch to the selected modes without a full modeset. E.g. when the display
+ * is in VGA mode we need to recalculate watermarks and set a new high-res
+ * framebuffer anyway.
+ */
+static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true;
+ int num_connectors_enabled = 0;
+ int num_connectors_detected = 0;
+
+ /*
+ * If the user specified any force options, just bail here
+ * and use that config.
+ */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (!enabled[i])
+ continue;
+
+ if (connector->force != DRM_FORCE_UNSPECIFIED)
+ return false;
+ }
+
+ save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
+ GFP_KERNEL);
+ if (!save_enabled)
+ return false;
+
+ memcpy(save_enabled, enabled, dev->mode_config.num_connector);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *new_crtc;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (connector->status == connector_status_connected)
+ num_connectors_detected++;
+
+ if (!enabled[i]) {
+ DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+ connector->name);
+ continue;
+ }
+
+ encoder = connector->encoder;
+ if (!encoder || WARN_ON(!encoder->crtc)) {
+ DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ continue;
+ }
+
+ num_connectors_enabled++;
+
+ new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
+
+ /*
+ * Make sure we're not trying to drive multiple connectors
+ * with a single CRTC, since our cloning support may not
+ * match the BIOS.
+ */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ if (crtcs[j] == new_crtc) {
+ DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ fallback = true;
+ goto out;
+ }
+ }
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+ connector->name);
+
+ /* go for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
+
+ /* try for preferred next */
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
+ connector->name);
+ modes[i] = drm_has_preferred_mode(fb_conn, width,
+ height);
+ }
+
+ /* No preferred mode marked by the EDID? Are there any modes? */
+ if (!modes[i] && !list_empty(&connector->modes)) {
+ DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+ connector->name);
+ modes[i] = list_first_entry(&connector->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ /* last resort: use current mode */
+ if (!modes[i]) {
+ /*
+ * IMPORTANT: We want to use the adjusted mode (i.e.
+ * after the panel fitter upscaling) as the initial
+ * config, not the input mode, which is what crtc->mode
+ * usually contains. But since our current fastboot
+ * code puts a mode derived from the post-pfit timings
+ * into crtc->mode this works out correctly. We don't
+ * use hwmode anywhere right now, so use it for this
+ * since the fb helper layer wants a pointer to
+ * something we own.
+ */
+ DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+ connector->name);
+ intel_mode_from_pipe_config(&encoder->crtc->hwmode,
+ &to_intel_crtc(encoder->crtc)->config);
+ modes[i] = &encoder->crtc->hwmode;
+ }
+ crtcs[i] = new_crtc;
+
+ DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+ connector->name,
+ pipe_name(to_intel_crtc(encoder->crtc)->pipe),
+ encoder->crtc->base.id,
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
+
+ fallback = false;
+ }
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+ * same user experiencing of lighting up as much as possible like the
+ * fbdev helper library.
+ */
+ if (num_connectors_enabled != num_connectors_detected &&
+ num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+ DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+ num_connectors_detected);
+ fallback = true;
+ }
+
+out:
+ if (fallback) {
+ DRM_DEBUG_KMS("Not using firmware configuration\n");
+ memcpy(enabled, save_enabled, dev->mode_config.num_connector);
+ kfree(save_enabled);
+ return false;
+ }
+
+ kfree(save_enabled);
+ return true;
+}
+
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .initial_config = intel_fb_initial_config,
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intelfb_create,
+};
+
+static void intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
+{
+ if (ifbdev->helper.fbdev) {
+ struct fb_info *info = ifbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ iounmap(info->screen_base);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_unregister_private(&ifbdev->fb->base);
+ drm_framebuffer_remove(&ifbdev->fb->base);
+}
+
+/*
+ * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
+ * The core display code will have read out the current plane configuration,
+ * so we use that to figure out if there's an object for us to use as the
+ * fb, and if so, we re-use it for the fbdev configuration.
+ *
+ * Note we only support a single fb shared across pipes for boot (mostly for
+ * fbcon), so we just find the biggest and use that.
+ */
+static bool intel_fbdev_init_bios(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
+{
+ struct intel_framebuffer *fb = NULL;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_plane_config *plane_config = NULL;
+ unsigned int max_size = 0;
+
+ if (!i915.fastboot)
+ return false;
+
+ /* Find the largest fb */
+ for_each_crtc(dev, crtc) {
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->active || !crtc->primary->fb) {
+ DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
+ pipe_name(intel_crtc->pipe));
+ continue;
+ }
+
+ if (intel_crtc->plane_config.size > max_size) {
+ DRM_DEBUG_KMS("found possible fb from plane %c\n",
+ pipe_name(intel_crtc->pipe));
+ plane_config = &intel_crtc->plane_config;
+ fb = to_intel_framebuffer(crtc->primary->fb);
+ max_size = plane_config->size;
+ }
+ }
+
+ if (!fb) {
+ DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+ goto out;
+ }
+
+ /* Now make sure all the pipes will fit into it */
+ for_each_crtc(dev, crtc) {
+ unsigned int cur_size;
+
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->active) {
+ DRM_DEBUG_KMS("pipe %c not active, skipping\n",
+ pipe_name(intel_crtc->pipe));
+ continue;
+ }
+
+ DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
+ pipe_name(intel_crtc->pipe));
+
+ /*
+ * See if the plane fb we found above will fit on this
+ * pipe. Note we need to use the selected fb's pitch and bpp
+ * rather than the current pipe's, since they differ.
+ */
+ cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
+ cur_size = cur_size * fb->base.bits_per_pixel / 8;
+ if (fb->base.pitches[0] < cur_size) {
+ DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, fb->base.pitches[0]);
+ plane_config = NULL;
+ fb = NULL;
+ break;
+ }
+
+ cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
+ cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
+ cur_size *= fb->base.pitches[0];
+ DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
+ pipe_name(intel_crtc->pipe),
+ intel_crtc->config.adjusted_mode.crtc_hdisplay,
+ intel_crtc->config.adjusted_mode.crtc_vdisplay,
+ fb->base.bits_per_pixel,
+ cur_size);
+
+ if (cur_size > max_size) {
+ DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, max_size);
+ plane_config = NULL;
+ fb = NULL;
+ break;
+ }
+
+ DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
+ pipe_name(intel_crtc->pipe),
+ max_size, cur_size);
+ }
+
+ if (!fb) {
+ DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+ goto out;
+ }
+
+ ifbdev->preferred_bpp = fb->base.bits_per_pixel;
+ ifbdev->fb = fb;
+
+ drm_framebuffer_reference(&ifbdev->fb->base);
+
+ /* Final pass to check if any active pipes don't have fbs */
+ for_each_crtc(dev, crtc) {
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!intel_crtc->active)
+ continue;
+
+ WARN(!crtc->primary->fb,
+ "re-used BIOS config but lost an fb on crtc %d\n",
+ crtc->base.id);
+ }
+
+
+ DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+ return true;
+
+out:
+
+ return false;
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+ return -ENODEV;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (ifbdev == NULL)
+ return -ENOMEM;
+
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+ if (!intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->preferred_bpp = 32;
+
+ ret = drm_fb_helper_init(dev, &ifbdev->helper,
+ INTEL_INFO(dev)->num_pipes, 4);
+ if (ret) {
+ kfree(ifbdev);
+ return ret;
+ }
+
+ dev_priv->fbdev = ifbdev;
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+
+ return 0;
+}
+
+void intel_fbdev_initial_config(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+
+ /* Due to peculiar init order wrt to hpd handling this is separate. */
+ drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
+}
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct fb_info *info;
+
+ if (!ifbdev)
+ return;
+
+ info = ifbdev->helper.fbdev;
+
+ /* On resume from hibernation: If the object is shmemfs backed, it has
+ * been restored from swap. If the object is stolen however, it will be
+ * full of whatever garbage was left in there.
+ */
+ if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ fb_set_suspend(info, state);
+}
+
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->fbdev)
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
+
+void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->fbdev)
+ return;
+
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 06431941b23..eee2bbec295 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -27,112 +27,845 @@
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include <linux/delay.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-#include "drm_edid.h"
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-struct intel_hdmi_priv {
- u32 sdvox_reg;
- u32 save_SDVOX;
- bool has_hdmi_sink;
-};
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+ return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t enabled_bits;
+
+ enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+ WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
+}
+
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->hdmi;
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+ return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+}
+
+static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
+{
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return VIDEO_DIP_SELECT_AVI;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return VIDEO_DIP_SELECT_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_SELECT_VENDOR;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+ return 0;
+ }
+}
+
+static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
+{
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VENDOR;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+ return 0;
+ }
+}
+
+static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
+{
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI_HSW;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD_HSW;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VS_HSW;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+ return 0;
+ }
+}
+
+static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
+ enum transcoder cpu_transcoder,
+ struct drm_i915_private *dev_priv)
+{
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
+ return 0;
+ }
+}
+
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+ enum hdmi_infoframe_type type,
+ const void *frame, ssize_t len)
+{
+ const uint32_t *data = frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(VIDEO_DIP_CTL);
+ int i;
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ val &= ~g4x_infoframe_enable(type);
-static void intel_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ I915_WRITE(VIDEO_DIP_CTL, val);
+
+ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
+ mmiowb();
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+ POSTING_READ(VIDEO_DIP_CTL);
+}
+
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+ enum hdmi_infoframe_type type,
+ const void *frame, ssize_t len)
{
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
- u32 sdvox;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
- sdvox = SDVO_ENCODING_HDMI |
- SDVO_BORDER_ENABLE |
- SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH;
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
- if (hdmi_priv->has_hdmi_sink)
- sdvox |= SDVO_AUDIO_ENABLE;
+ val &= ~g4x_infoframe_enable(type);
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+ I915_WRITE(reg, val);
- I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ mmiowb();
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
-static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+ enum hdmi_infoframe_type type,
+ const void *frame, ssize_t len)
{
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (type != HDMI_INFOFRAME_TYPE_AVI)
+ val &= ~g4x_infoframe_enable(type);
+
+ I915_WRITE(reg, val);
+
+ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ mmiowb();
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+ enum hdmi_infoframe_type type,
+ const void *frame, ssize_t len)
+{
+ const uint32_t *data = frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ val &= ~g4x_infoframe_enable(type);
+
+ I915_WRITE(reg, val);
+
+ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ mmiowb();
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+ enum hdmi_infoframe_type type,
+ const void *frame, ssize_t len)
+{
+ const uint32_t *data = frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 data_reg;
+ int i;
+ u32 val = I915_READ(ctl_reg);
+
+ data_reg = hsw_infoframe_data_reg(type,
+ intel_crtc->config.cpu_transcoder,
+ dev_priv);
+ if (data_reg == 0)
+ return;
+
+ val &= ~hsw_infoframe_enable(type);
+ I915_WRITE(ctl_reg, val);
+
+ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(data_reg + i, *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
+ mmiowb();
+
+ val |= hsw_infoframe_enable(type);
+ I915_WRITE(ctl_reg, val);
+ POSTING_READ(ctl_reg);
+}
+
+/*
+ * The data we write to the DIP data buffer registers is 1 byte bigger than the
+ * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
+ * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
+ * used for both technologies.
+ *
+ * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
+ * DW1: DB3 | DB2 | DB1 | DB0
+ * DW2: DB7 | DB6 | DB5 | DB4
+ * DW3: ...
+ *
+ * (HB is Header Byte, DB is Data Byte)
+ *
+ * The hdmi pack() functions don't know about that hardware specific hole so we
+ * trick them by giving an offset into the buffer and moving back the header
+ * bytes by one.
+ */
+static void intel_write_infoframe(struct drm_encoder *encoder,
+ union hdmi_infoframe *frame)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+ ssize_t len;
+
+ /* see comment above for the reason for this offset */
+ len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
+ if (len < 0)
+ return;
+
+ /* Insert the 'hole' (see big comment above) at position 3 */
+ buffer[0] = buffer[1];
+ buffer[1] = buffer[2];
+ buffer[2] = buffer[3];
+ buffer[3] = 0;
+ len++;
+
+ intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ adjusted_mode);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ if (intel_hdmi->rgb_quant_range_selectable) {
+ if (intel_crtc->config.limited_color_range)
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ else
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_FULL;
+ }
+
+ intel_write_infoframe(encoder, &frame);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill SPD infoframe\n");
+ return;
+ }
+
+ frame.spd.sdi = HDMI_SPD_SDI_PC;
+
+ intel_write_infoframe(encoder, &frame);
+}
+
+static void
+intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ adjusted_mode);
+ if (ret < 0)
+ return;
+
+ intel_write_infoframe(encoder, &frame);
+}
+
+static void g4x_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ u32 reg = VIDEO_DIP_CTL;
+ u32 val = I915_READ(reg);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* If the registers were not initialized yet, they might be zeroes,
+ * which means we're selecting the AVI DIP and we're setting its
+ * frequency to once. This seems to really confuse the HW and make
+ * things stop working (the register spec says the AVI always needs to
+ * be sent every VSync). So here we avoid writing to the register more
+ * than we need and also explicitly select the AVI DIP and explicitly
+ * set its frequency to every VSync. Avoiding to write it twice seems to
+ * be enough to solve the problem, but being defensive shouldn't hurt us
+ * either. */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~VIDEO_DIP_ENABLE_VENDOR;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void ibx_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void cpt_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ /* Set both together, unset both together: see the spec. */
+ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void vlv_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
+ VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void hsw_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ if (!enable) {
+ I915_WRITE(reg, 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+}
+
+static void intel_hdmi_prepare(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ u32 hdmi_val;
+
+ hdmi_val = SDVO_ENCODING_HDMI;
+ if (!HAS_PCH_SPLIT(dev))
+ hdmi_val |= intel_hdmi->color_range;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
+
+ if (crtc->config.pipe_bpp > 24)
+ hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
+ else
+ hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
+
+ if (crtc->config.has_hdmi_sink)
+ hdmi_val |= HDMI_MODE_SELECT_HDMI;
+
+ if (crtc->config.has_audio) {
+ WARN_ON(!crtc->config.has_hdmi_sink);
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ hdmi_val |= SDVO_AUDIO_ENABLE;
+ intel_write_eld(&encoder->base, adjusted_mode);
+ }
+
+ if (HAS_PCH_CPT(dev))
+ hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+ else if (IS_CHERRYVIEW(dev))
+ hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
+ else
+ hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
+
+ I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+}
+
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
+ tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else if (IS_CHERRYVIEW(dev))
+ *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_hdmi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp, flags = 0;
+ int dotclock;
+
+ tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+ if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_hdmi_sink = true;
+
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_audio = true;
+
+ pipe_config->adjusted_mode.flags |= flags;
+
+ if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+ dotclock = pipe_config->port_clock * 2 / 3;
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+ if (intel_crtc->config.has_audio)
+ enable_bits |= SDVO_AUDIO_ENABLE;
- temp = I915_READ(hdmi_priv->sdvox_reg);
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it, so restore the transcoder select bit here. */
+ if (HAS_PCH_IBX(dev))
+ enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~SDVO_ENABLE;
- } else {
- temp |= SDVO_ENABLE;
- }
+ temp |= enable_bits;
- I915_WRITE(hdmi_priv->sdvox_reg, temp);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(hdmi_priv->sdvox_reg, temp);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
}
-static void intel_hdmi_save(struct drm_connector *connector)
+static void vlv_enable_hdmi(struct intel_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ }
+
+ temp &= ~enable_bits;
+
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ }
}
-static void intel_hdmi_restore(struct drm_connector *connector)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
- POSTING_READ(hdmi_priv->sdvox_reg);
+ if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
+ return 165000;
+ else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
+ return 300000;
+ else
+ return 225000;
}
-static int intel_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- if (mode->clock > 165000)
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+ true))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
- return MODE_CLOCK_HIGH;
+ return MODE_CLOCK_LOW;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -140,73 +873,535 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+ int count = 0, count_hdmi = 0;
+
+ if (!HAS_PCH_SPLIT(dev))
+ return false;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (encoder->new_crtc != crtc)
+ continue;
+
+ count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
+ count++;
+ }
+
+ /*
+ * HDMI 12bpc affects the clocks, so it's only possible
+ * when not cloning with other encoder types.
+ */
+ return count_hdmi > 0 && count_hdmi == count;
+}
+
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+ int desired_bpp;
+
+ pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
+
+ if (intel_hdmi->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (pipe_config->has_hdmi_sink &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+ else
+ intel_hdmi->color_range = 0;
+ }
+
+ if (intel_hdmi->color_range)
+ pipe_config->limited_color_range = true;
+
+ if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+ pipe_config->has_pch_encoder = true;
+
+ if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
+ pipe_config->has_audio = true;
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
+ * outputs. We also need to check that the higher clock still fits
+ * within limits.
+ */
+ if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
+ clock_12bpc <= portclock_limit &&
+ hdmi_12bpc_possible(encoder->new_crtc)) {
+ DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
+ desired_bpp = 12*3;
+
+ /* Need to adjust the port link by 1.5x for 12bpc. */
+ pipe_config->port_clock = clock_12bpc;
+ } else {
+ DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
+ desired_bpp = 8*3;
+ }
+
+ if (!pipe_config->bw_constrained) {
+ DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
+ pipe_config->pipe_bpp = desired_bpp;
+ }
+
+ if (adjusted_mode->crtc_clock > portclock_limit) {
+ DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+ return false;
+ }
+
return true;
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector)
+intel_hdmi_detect(struct drm_connector *connector, bool force)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
- struct edid *edid = NULL;
+ struct drm_device *dev = connector->dev;
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edid *edid;
+ enum intel_display_power_domain power_domain;
enum drm_connector_status status = connector_status_disconnected;
- hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ intel_hdmi->has_hdmi_sink = false;
+ intel_hdmi->has_audio = false;
+ intel_hdmi->rgb_quant_range_selectable = false;
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ intel_hdmi->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
}
- intel_output->base.display_info.raw_edid = NULL;
kfree(edid);
}
+ if (status == connector_status_connected) {
+ if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+ intel_hdmi->has_audio =
+ (intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ }
+
+ intel_display_power_put(dev_priv, power_domain);
+
return status;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ int ret;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(intel_output);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ ret = intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
-static void intel_hdmi_destroy(struct drm_connector *connector)
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ struct edid *edid;
+ bool has_audio = false;
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- drm_sysfs_connector_remove(connector);
- drm_connector_cleanup(connector);
- kfree(intel_output);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return has_audio;
}
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .dpms = intel_hdmi_dpms,
- .mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
- .mode_set = intel_hdmi_mode_set,
- .commit = intel_encoder_commit,
-};
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ enum hdmi_force_audio i = val;
+ bool has_audio;
+
+ if (i == intel_hdmi->force_audio)
+ return 0;
+
+ intel_hdmi->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_hdmi_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (i == HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink = 0;
+
+ intel_hdmi->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_hdmi->color_range_auto;
+ uint32_t old_range = intel_hdmi->color_range;
+
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_hdmi->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (old_auto == intel_hdmi->color_range_auto &&
+ old_range == intel_hdmi->color_range)
+ return 0;
+
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_dig_port->base.base.crtc)
+ intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
+
+ return 0;
+}
+
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+
+ intel_hdmi_prepare(encoder);
+
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
+}
+
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &dport->hdmi;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ /* Enable clock channels for this port */
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+ /* HDMI 1.0V-2dB */
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+
+ /* Program lane clock */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
+
+ intel_enable_hdmi(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ intel_hdmi_prepare(encoder);
+
+ /* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ /* Reset lanes to avoid HDMI flicker (VLV w/a) */
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Propagate soft reset to data lane reset */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Deassert soft data lane reset*/
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ /* Program Tx latency optimal setting */
+ for (i = 0; i < 4; i++) {
+ /* Set the latency optimal bit */
+ data = (i == 1) ? 0x0 : 0x6;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
+ data << DPIO_FRC_LATENCY_SHFIT);
+
+ /* Set the upar bit */
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ /* FIXME: Fix up value only after power analysis */
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* FIXME: Program the support xxx V-dB */
+ /* Use 800mV-0dB */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~DPIO_SWING_MARGIN_MASK;
+ val |= 102 << DPIO_SWING_MARGIN_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /* Disable unique transition scale */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ /* Additional steps for 1200mV-0dB */
+#if 0
+ val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
+ if (ch)
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
+ else
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
+ vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
+ (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
+#endif
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* LRC Bypass */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ val |= DPIO_LRC_BYPASS;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_enable_hdmi(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
+static void intel_hdmi_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .save = intel_hdmi_save,
- .restore = intel_hdmi_restore,
+ .dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_hdmi_set_property,
.destroy = intel_hdmi_destroy,
};
@@ -216,126 +1411,85 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
.best_encoder = intel_best_encoder,
};
-static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
- .destroy = intel_hdmi_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given HDMI is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the given
- * HDMI is present.
- */
-static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, hdmi_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- if (hdmi_reg == SDVOB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == SDVOC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMIB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == HDMIC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMID)
- hdmi_port = DVO_D;
- else
- return 0;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not HDMI, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_HDMI)
- continue;
- /* Find the HDMI port */
- if (p_child->dvo_port == hdmi_port) {
- ret = 1;
- break;
- }
- }
- return ret;
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ intel_hdmi->color_range_auto = true;
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_output *intel_output;
- struct intel_hdmi_priv *hdmi_priv;
+ enum port port = intel_dig_port->port;
- if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
- DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
- return;
- }
- intel_output = kcalloc(sizeof(struct intel_output) +
- sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
- if (!intel_output)
- return;
- hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1);
-
- connector = &intel_output->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- intel_output->type = INTEL_OUTPUT_HDMI;
-
- connector->interlace_allowed = 0;
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
-
- /* Set up the DDC bus. */
- if (sdvox_reg == SDVOB) {
- intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == SDVOC) {
- intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIB) {
- intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
- "HDMIB");
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIC) {
- intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
- "HDMIC");
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMID) {
- intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
- "HDMID");
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- }
- if (!intel_output->ddc_bus)
- goto err_connector;
-
- hdmi_priv->sdvox_reg = sdvox_reg;
- intel_output->dev_priv = hdmi_priv;
-
- drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs);
+ connector->stereo_allowed = 1;
+
+ switch (port) {
+ case PORT_B:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ intel_encoder->hpd_pin = HPD_PORT_B;
+ break;
+ case PORT_C:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ intel_encoder->hpd_pin = HPD_PORT_C;
+ break;
+ case PORT_D:
+ if (IS_CHERRYVIEW(dev))
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
+ else
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ intel_encoder->hpd_pin = HPD_PORT_D;
+ break;
+ case PORT_A:
+ intel_encoder->hpd_pin = HPD_PORT_A;
+ /* Internal port only for eDP. */
+ default:
+ BUG();
+ }
+
+ if (IS_VALLEYVIEW(dev)) {
+ intel_hdmi->write_infoframe = vlv_write_infoframe;
+ intel_hdmi->set_infoframes = vlv_set_infoframes;
+ } else if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
+ intel_hdmi->set_infoframes = g4x_set_infoframes;
+ } else if (HAS_DDI(dev)) {
+ intel_hdmi->write_infoframe = hsw_write_infoframe;
+ intel_hdmi->set_infoframes = hsw_set_infoframes;
+ } else if (HAS_PCH_IBX(dev)) {
+ intel_hdmi->write_infoframe = ibx_write_infoframe;
+ intel_hdmi->set_infoframes = ibx_set_infoframes;
+ } else {
+ intel_hdmi->write_infoframe = cpt_write_infoframe;
+ intel_hdmi->set_infoframes = cpt_set_infoframes;
+ }
+
+ if (HAS_DDI(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
+
+ intel_hdmi_add_properties(intel_hdmi, connector);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -346,12 +1500,68 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+}
- return;
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
-err_connector:
- drm_connector_cleanup(connector);
- kfree(intel_output);
+ intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ intel_encoder->compute_config = intel_hdmi_compute_config;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ intel_encoder->get_config = intel_hdmi_get_config;
+ if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_enable = chv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = chv_hdmi_post_disable;
+ } else if (IS_VALLEYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = vlv_hdmi_post_disable;
+ } else {
+ intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ intel_encoder->enable = intel_enable_hdmi;
+ }
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ if (IS_CHERRYVIEW(dev)) {
+ if (port == PORT_D)
+ intel_encoder->crtc_mask = 1 << 2;
+ else
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ } else {
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ }
+ intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+ /*
+ * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
+ * to work on real hardware. And since g4x can send infoframes to
+ * only one port anyway, nothing is lost by allowing it.
+ */
+ if (IS_G4X(dev))
+ intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
+
+ intel_dig_port->port = port;
+ intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
+ intel_dig_port->dp.output_reg = 0;
- return;
+ intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c735b8a..d33b61d0dd3 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
- * Copyright © 2006-2008 Intel Corporation
+ * Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,89 +24,177 @@
*
* Authors:
* Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
*/
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
-#include "drmP.h"
-#include "drm.h"
+#include <linux/export.h>
+#include <drm/drmP.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
+enum disp_clk {
+ CDCLK,
+ CZCLK
+};
+
+struct gmbus_port {
+ const char *name;
+ int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+ { "ssc", GPIOB },
+ { "vga", GPIOA },
+ { "panel", GPIOC },
+ { "dpc", GPIOD },
+ { "dpb", GPIOE },
+ { "dpd", GPIOF },
+};
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 10
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+static int get_disp_clk_div(struct drm_i915_private *dev_priv,
+ enum disp_clk clk)
+{
+ u32 reg_val;
+ int clk_ratio;
+
+ reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
+
+ if (clk == CDCLK)
+ clk_ratio =
+ ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
+ else
+ clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
+
+ return clk_ratio;
+}
+
+static void gmbus_set_freq(struct drm_i915_private *dev_priv)
+{
+ int vco, gmbus_freq = 0, cdclk_div;
+
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+ vco = valleyview_get_vco(dev_priv);
+
+ /* Get the CDCLK divide ratio */
+ cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
+
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ if (cdclk_div)
+ gmbus_freq = (vco << 1) / cdclk_div;
+
+ if (WARN_ON(gmbus_freq == 0))
+ return;
+
+ I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
+}
+
+void
+intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /*
+ * In BIOS-less system, program the correct gmbus frequency
+ * before reading edid.
+ */
+ if (IS_VALLEYVIEW(dev))
+ gmbus_set_freq(dev_priv);
+
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev))
+ if (!IS_PINEVIEW(dev_priv->dev))
return;
+
+ val = I915_READ(DSPCLK_GATE_D);
if (enable)
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
else
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
}
-/*
- * Intel GPIO access functions
- */
+static u32 get_reserved(struct intel_gmbus *bus)
+{
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
+
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(dev) && !IS_845G(dev))
+ reserved = I915_READ_NOTRACE(bus->gpio_reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
-#define I2C_RISEFALL_TIME 20
+ return reserved;
+}
static int get_clock(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_CLOCK_VAL_IN) != 0);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_DATA_VAL_IN) != 0);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, clock_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ u32 clock_bits;
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(chan->reg, reserved | clock_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
+ POSTING_READ(bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, data_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ u32 data_bits;
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -114,109 +202,464 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(chan->reg, reserved | data_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
+ POSTING_READ(bus->gpio_reg);
+}
+
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ intel_i2c_reset(dev_priv->dev);
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ udelay(I2C_RISEFALL_TIME);
+ return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
+intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+{
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct i2c_algo_bit_data *algo;
+
+ algo = &bus->bit_algo;
+
+ /* -1 to map pin pair to gmbus index */
+ bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
+
+ bus->adapter.algo_data = algo;
+ algo->setsda = set_data;
+ algo->setscl = set_clock;
+ algo->getsda = get_data;
+ algo->getscl = get_clock;
+ algo->pre_xfer = intel_gpio_pre_xfer;
+ algo->post_xfer = intel_gpio_post_xfer;
+ algo->udelay = I2C_RISEFALL_TIME;
+ algo->timeout = usecs_to_jiffies(2200);
+ algo->data = bus;
}
-/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
- * engine, but if the BIOS leaves it enabled, then that can break our use
- * of the bit-banging I2C interfaces. This is notably the case with the
- * Mac Mini in EFI mode.
+static int
+gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
+ u32 gmbus2_status,
+ u32 gmbus4_irq_en)
+{
+ int i;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus2 = 0;
+ DEFINE_WAIT(wait);
+
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ gmbus4_irq_en = 0;
+
+ /* Important: The hw handles only the first bit, so set only one! Since
+ * we also need to check for NAKs besides the hw ready/idle signal, we
+ * need to wake up periodically and check that ourselves. */
+ I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
+
+ for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
+ prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
+ if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
+ break;
+
+ schedule_timeout(1);
+ }
+ finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ if (gmbus2 & gmbus2_status)
+ return 0;
+ return -ETIMEDOUT;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ int reg_offset = dev_priv->gpio_mmio_base;
+
+#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
+
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ return wait_for(C, 10);
+
+ /* Important: The hw handles only the first bit, so set only one! */
+ I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
+
+ ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(10));
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (ret)
+ return 0;
+ else
+ return -ETIMEDOUT;
+#undef C
+}
+
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 val, loop = 0;
+
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
+ if (ret)
+ return ret;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ }
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+ u32 val, loop;
+
+ val = loop = 0;
+ while (len && loop < 4) {
+ val |= *buf++ << (8 * loop++);
+ len -= 1;
+ }
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
*/
-void
-intel_i2c_reset_gmbus(struct drm_device *dev)
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ return (i + 1 < num &&
+ !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus1_index = 0;
+ u32 gmbus5 = 0;
+ int ret;
+
+ if (msgs[0].len == 2)
+ gmbus5 = GMBUS_2BYTE_INDEX_EN |
+ msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+ if (msgs[0].len == 1)
+ gmbus1_index = GMBUS_CYCLE_INDEX |
+ (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
+
+ /* GMBUS5 holds 16-bit index */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, gmbus5);
+
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+
+ /* Clear GMBUS5 after each index transfer */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, 0);
+
+ return ret;
+}
- if (IS_IRONLAKE(dev)) {
- I915_WRITE(PCH_GMBUS0, 0);
- } else {
- I915_WRITE(GMBUS0, 0);
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int i, reg_offset;
+ int ret = 0;
+
+ intel_aux_display_runtime_get(dev_priv);
+ mutex_lock(&dev_priv->gmbus_mutex);
+
+ if (bus->force_bit) {
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+ goto out;
+ }
+
+ reg_offset = dev_priv->gpio_mmio_base;
+
+ I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ if (gmbus_is_index_read(msgs, i, num)) {
+ ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+ i += 1; /* set i to the index of the read xfer */
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ } else {
+ ret = gmbus_xfer_write(dev_priv, &msgs[i]);
+ }
+
+ if (ret == -ETIMEDOUT)
+ goto timeout;
+ if (ret == -ENXIO)
+ goto clear_err;
+
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+ GMBUS_HW_WAIT_EN);
+ if (ret == -ENXIO)
+ goto clear_err;
+ if (ret)
+ goto timeout;
}
+
+ /* Generate a STOP condition on the bus. Note that gmbus can't generata
+ * a STOP on the very first cycle. To simplify the code we
+ * unconditionally generate the STOP condition with an additional gmbus
+ * cycle. */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (gmbus_wait_idle(dev_priv)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ ret = ret ?: i;
+ goto out;
+
+clear_err:
+ /*
+ * Wait for bus to IDLE before clearing NAK.
+ * If we clear the NAK while bus is still active, then it will stay
+ * active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
+ */
+ ret = -ENXIO;
+ if (gmbus_wait_idle(dev_priv)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ I915_WRITE(GMBUS1 + reg_offset, 0);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+
+ DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
+ goto out;
+
+timeout:
+ DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = 1;
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
+out:
+ mutex_unlock(&dev_priv->gmbus_mutex);
+ intel_aux_display_runtime_put(dev_priv);
+ return ret;
}
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ return i2c_bit_algo.functionality(adapter) &
+ (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
/**
- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev: DRM device
- * @output: driver specific output device
- * @reg: GPIO reg to use
- * @name: name for this bus
- * @slave_addr: slave address (if fixed)
- *
- * Creates and registers a new i2c bus with the Linux i2c layer, for use
- * in output probing and control (e.g. DDC or SDVO control functions).
- *
- * Possible values for @reg include:
- * %GPIOA
- * %GPIOB
- * %GPIOC
- * %GPIOD
- * %GPIOE
- * %GPIOF
- * %GPIOG
- * %GPIOH
- * see PRM for details on how these different busses are used.
*/
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+int intel_setup_gmbus(struct drm_device *dev)
{
- struct intel_i2c_chan *chan;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (HAS_PCH_NOP(dev))
+ return 0;
+ else if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else if (IS_VALLEYVIEW(dev))
+ dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
+ else
+ dev_priv->gpio_mmio_base = 0;
- chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
- if (!chan)
- goto out_free;
+ mutex_init(&dev_priv->gmbus_mutex);
+ init_waitqueue_head(&dev_priv->gmbus_wait_queue);
- chan->drm_dev = dev;
- chan->reg = reg;
- snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
- chan->adapter.owner = THIS_MODULE;
- chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
- chan->algo.setsda = set_data;
- chan->algo.setscl = set_clock;
- chan->algo.getsda = get_data;
- chan->algo.getscl = get_clock;
- chan->algo.udelay = 20;
- chan->algo.timeout = usecs_to_jiffies(2200);
- chan->algo.data = chan;
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ u32 port = i + 1; /* +1 to map gmbus index to pin pair */
- i2c_set_adapdata(&chan->adapter, chan);
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "i915 gmbus %s",
+ gmbus_ports[i].name);
- if(i2c_bit_add_bus(&chan->adapter))
- goto out_free;
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->dev_priv = dev_priv;
- intel_i2c_reset_gmbus(dev);
+ bus->adapter.algo = &gmbus_algorithm;
- /* JJJ: raise SCL and SDA? */
- intel_i2c_quirk_set(dev, true);
- set_data(chan, 1);
- set_clock(chan, 1);
- intel_i2c_quirk_set(dev, false);
- udelay(20);
+ /* By default use a conservative clock rate */
+ bus->reg0 = port | GMBUS_RATE_100KHZ;
- return &chan->adapter;
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(dev))
+ bus->force_bit = 1;
-out_free:
- kfree(chan);
- return NULL;
+ intel_gpio_setup(bus, port);
+
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
+ }
+
+ intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ return ret;
}
-/**
- * intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
- *
- * Unregister the adapter from the i2c layer, then free the structure.
- */
-void intel_i2c_destroy(struct i2c_adapter *adapter)
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+ unsigned port)
{
- struct intel_i2c_chan *chan;
+ WARN_ON(!intel_gmbus_is_port_valid(port));
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ &dev_priv->gmbus[port - 1].adapter : NULL;
+}
- if (!adapter)
- return;
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
- chan = container_of(adapter,
- struct intel_i2c_chan,
- adapter);
- i2c_del_adapter(&chan->adapter);
- kfree(chan);
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ bus->force_bit += force_bit ? 1 : -1;
+ DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f4b4aa242df..5e5a72fca5f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -30,509 +30,294 @@
#include <acpi/button.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-#include "drm_edid.h"
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds_priv {
- int fitting_mode;
- u32 pfit_control;
- u32 pfit_pgm_ratios;
+struct intel_lvds_connector {
+ struct intel_connector base;
+
+ struct notifier_block lid_notifier;
};
-/**
- * Sets the backlight level.
- *
- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
- */
-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
+struct intel_lvds_encoder {
+ struct intel_encoder base;
+
+ bool is_dual_link;
+ u32 reg;
+
+ struct intel_lvds_connector *attached_connector;
+};
+
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
+}
+
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{
+ return container_of(connector, struct intel_lvds_connector, base.base);
+}
+
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blc_pwm_ctl, reg;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(lvds_encoder->reg);
+
+ if (!(tmp & LVDS_PORT_EN))
+ return false;
- if (IS_IRONLAKE(dev))
- reg = BLC_PWM_CPU_CTL;
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
else
- reg = BLC_PWM_CTL;
+ *pipe = PORT_TO_PIPE(tmp);
- blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(reg, (blc_pwm_ctl |
- (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ return true;
}
-/**
- * Returns the maximum level of the backlight duty cycle field.
- */
-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+static void intel_lvds_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
+ u32 lvds_reg, tmp, flags = 0;
+ int dotclock;
+
+ if (HAS_PCH_SPLIT(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
- if (IS_IRONLAKE(dev))
- reg = BLC_PWM_PCH_CTL2;
+ tmp = I915_READ(lvds_reg);
+ if (tmp & LVDS_HSYNC_POLARITY)
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ if (tmp & LVDS_VSYNC_POLARITY)
+ flags |= DRM_MODE_FLAG_NVSYNC;
else
- reg = BLC_PWM_CTL;
+ flags |= DRM_MODE_FLAG_PVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
- return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+ dotclock = pipe_config->port_clock;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
}
-/**
- * Sets the power state for the panel.
- */
-static void intel_lvds_set_power(struct drm_device *dev, bool on)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_status, ctl_reg, status_reg;
-
- if (IS_IRONLAKE(dev)) {
- ctl_reg = PCH_PP_CONTROL;
- status_reg = PCH_PP_STATUS;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config.adjusted_mode;
+ int pipe = crtc->pipe;
+ u32 temp;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ assert_fdi_rx_pll_disabled(dev_priv, pipe);
+ assert_shared_dpll_disabled(dev_priv,
+ intel_crtc_to_shared_dpll(crtc));
} else {
- ctl_reg = PP_CONTROL;
- status_reg = PP_STATUS;
+ assert_pll_disabled(dev_priv, pipe);
}
- if (on) {
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
- POWER_TARGET_ON);
- do {
- pp_status = I915_READ(status_reg);
- } while ((pp_status & PP_ON) == 0);
+ temp = I915_READ(lvds_encoder->reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
- intel_lvds_set_backlight(dev, 0);
-
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
- ~POWER_TARGET_ON);
- do {
- pp_status = I915_READ(status_reg);
- } while (pp_status & PP_ON);
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
}
-}
-static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
-
- if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(dev, true);
+ /* set the corresponsding LVDS_BORDER bit */
+ temp &= ~LVDS_BORDER_ENABLE;
+ temp |= crtc->config.gmch_pfit.lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (lvds_encoder->is_dual_link)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
- intel_lvds_set_power(dev, false);
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
- /* XXX: We never power down the LVDS pairs. */
+ /* Set the dithering flag on LVDS as needed, note that there is no
+ * special lvds dither control bit on pch-split platforms, dithering is
+ * only controlled through the PIPECONF reg. */
+ if (INTEL_INFO(dev)->gen == 4) {
+ /* Bspec wording suggests that LVDS port dithering only exists
+ * for 18bpp panels. */
+ if (crtc->config.dither && crtc->config.pipe_bpp == 18)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+
+ I915_WRITE(lvds_encoder->reg, temp);
}
-static void intel_lvds_save(struct drm_connector *connector)
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_enable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (IS_IRONLAKE(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
+ u32 ctl_reg, stat_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ stat_reg = PCH_PP_STATUS;
} else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
+ ctl_reg = PP_CONTROL;
+ stat_reg = PP_STATUS;
}
- dev_priv->savePP_ON = I915_READ(pp_on_reg);
- dev_priv->savePP_OFF = I915_READ(pp_off_reg);
- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
- /*
- * If the light is off at server startup, just make it full brightness
- */
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_encoder->reg);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power on\n");
+
+ intel_panel_enable_backlight(intel_connector);
}
-static void intel_lvds_restore(struct drm_connector *connector)
+static void intel_disable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (IS_IRONLAKE(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
+ u32 ctl_reg, stat_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ stat_reg = PCH_PP_STATUS;
} else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
+ ctl_reg = PP_CONTROL;
+ stat_reg = PP_STATUS;
}
- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
- intel_lvds_set_power(dev, true);
- else
- intel_lvds_set_power(dev, false);
+ intel_panel_disable_backlight(intel_connector);
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_encoder->reg);
}
-static int intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- }
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
return MODE_OK;
}
-static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+ struct intel_crtc_config *pipe_config)
{
- /*
- * float point operation is not supported . So the PANEL_RATIO_FACTOR
- * is defined, which can avoid the float point computation when
- * calculating the panel ratio.
- */
-#define PANEL_RATIO_FACTOR 8192
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct drm_encoder *tmp_encoder;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
- u32 pfit_control = 0, pfit_pgm_ratios = 0;
- int left_border = 0, right_border = 0, top_border = 0;
- int bottom_border = 0;
- bool border = 0;
- int panel_ratio, desired_ratio, vert_scale, horiz_scale;
- int horiz_ratio, vert_ratio;
- u32 hsync_width, vsync_width;
- u32 hblank_width, vblank_width;
- u32 hsync_pos, vsync_pos;
+ struct intel_lvds_encoder *lvds_encoder =
+ to_lvds_encoder(&intel_encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ unsigned int lvds_bpp;
/* Should never happen!! */
- if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
- /* Should never happen!! */
- list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
- if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
- DRM_ERROR("Can't enable LVDS and another "
- "encoder on the same pipe\n");
- return false;
- }
+ if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpp = 8*3;
+ else
+ lvds_bpp = 6*3;
+
+ if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
+ DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
+ pipe_config->pipe_bpp = lvds_bpp;
}
- /* If we don't have a panel mode, there is nothing we can do */
- if (dev_priv->panel_fixed_mode == NULL)
- return true;
+
/*
- * If we have timings from the BIOS for the panel, put them in
+ * We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (dev_priv->panel_fixed_mode != NULL) {
- adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
- adjusted_mode->hsync_start =
- dev_priv->panel_fixed_mode->hsync_start;
- adjusted_mode->hsync_end =
- dev_priv->panel_fixed_mode->hsync_end;
- adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
- adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
- adjusted_mode->vsync_start =
- dev_priv->panel_fixed_mode->vsync_start;
- adjusted_mode->vsync_end =
- dev_priv->panel_fixed_mode->vsync_end;
- adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
- adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- }
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
- /* Make sure pre-965s set dither correctly */
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
-
- /* Native modes don't need fitting */
- if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay) {
- pfit_pgm_ratios = 0;
- border = 0;
- goto out;
- }
+ if (HAS_PCH_SPLIT(dev)) {
+ pipe_config->has_pch_encoder = true;
- /* full screen scale for now */
- if (IS_IRONLAKE(dev))
- goto out;
-
- /* 965+ wants fuzzy fitting */
- if (IS_I965G(dev))
- pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- PFIT_FILTER_FUZZY;
-
- hsync_width = adjusted_mode->crtc_hsync_end -
- adjusted_mode->crtc_hsync_start;
- vsync_width = adjusted_mode->crtc_vsync_end -
- adjusted_mode->crtc_vsync_start;
- hblank_width = adjusted_mode->crtc_hblank_end -
- adjusted_mode->crtc_hblank_start;
- vblank_width = adjusted_mode->crtc_vblank_end -
- adjusted_mode->crtc_vblank_start;
- /*
- * Deal with panel fitting options. Figure out how to stretch the
- * image based on its aspect ratio & the current panel fitting mode.
- */
- panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
- adjusted_mode->vdisplay;
- desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
- mode->vdisplay;
- /*
- * Enable automatic panel scaling for non-native modes so that they fill
- * the screen. Should be enabled before the pipe is enabled, according
- * to register description and PRM.
- * Change the value here to see the borders for debugging
- */
- if (!IS_IRONLAKE(dev)) {
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
- }
-
- switch (lvds_priv->fitting_mode) {
- case DRM_MODE_SCALE_CENTER:
- /*
- * For centered modes, we have to calculate border widths &
- * heights and modify the values programmed into the CRTC.
- */
- left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
- right_border = left_border;
- if (mode->hdisplay & 1)
- right_border++;
- top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
- bottom_border = top_border;
- if (mode->vdisplay & 1)
- bottom_border++;
- /* Set active & border values */
- adjusted_mode->crtc_hdisplay = mode->hdisplay;
- /* Keep the boder be even */
- if (right_border & 1)
- right_border++;
- /* use the border directly instead of border minuse one */
- adjusted_mode->crtc_hblank_start = mode->hdisplay +
- right_border;
- /* keep the blank width constant */
- adjusted_mode->crtc_hblank_end =
- adjusted_mode->crtc_hblank_start + hblank_width;
- /* get the hsync pos relative to hblank start */
- hsync_pos = (hblank_width - hsync_width) / 2;
- /* keep the hsync pos be even */
- if (hsync_pos & 1)
- hsync_pos++;
- adjusted_mode->crtc_hsync_start =
- adjusted_mode->crtc_hblank_start + hsync_pos;
- /* keep the hsync width constant */
- adjusted_mode->crtc_hsync_end =
- adjusted_mode->crtc_hsync_start + hsync_width;
- adjusted_mode->crtc_vdisplay = mode->vdisplay;
- /* use the border instead of border minus one */
- adjusted_mode->crtc_vblank_start = mode->vdisplay +
- bottom_border;
- /* keep the vblank width constant */
- adjusted_mode->crtc_vblank_end =
- adjusted_mode->crtc_vblank_start + vblank_width;
- /* get the vsync start postion relative to vblank start */
- vsync_pos = (vblank_width - vsync_width) / 2;
- adjusted_mode->crtc_vsync_start =
- adjusted_mode->crtc_vblank_start + vsync_pos;
- /* keep the vsync width constant */
- adjusted_mode->crtc_vsync_end =
- adjusted_mode->crtc_vsync_start + vsync_width;
- border = 1;
- break;
- case DRM_MODE_SCALE_ASPECT:
- /* Scale but preserve the spect ratio */
- pfit_control |= PFIT_ENABLE;
- if (IS_I965G(dev)) {
- /* 965+ is easy, it does everything in hw */
- if (panel_ratio > desired_ratio)
- pfit_control |= PFIT_SCALING_PILLAR;
- else if (panel_ratio < desired_ratio)
- pfit_control |= PFIT_SCALING_LETTER;
- else
- pfit_control |= PFIT_SCALING_AUTO;
- } else {
- /*
- * For earlier chips we have to calculate the scaling
- * ratio by hand and program it into the
- * PFIT_PGM_RATIO register
- */
- u32 horiz_bits, vert_bits, bits = 12;
- horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
- adjusted_mode->hdisplay;
- vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
- adjusted_mode->vdisplay;
- horiz_scale = adjusted_mode->hdisplay *
- PANEL_RATIO_FACTOR / mode->hdisplay;
- vert_scale = adjusted_mode->vdisplay *
- PANEL_RATIO_FACTOR / mode->vdisplay;
-
- /* retain aspect ratio */
- if (panel_ratio > desired_ratio) { /* Pillar */
- u32 scaled_width;
- scaled_width = mode->hdisplay * vert_scale /
- PANEL_RATIO_FACTOR;
- horiz_ratio = vert_ratio;
- pfit_control |= (VERT_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- /* Pillar will have left/right borders */
- left_border = (adjusted_mode->hdisplay -
- scaled_width) / 2;
- right_border = left_border;
- if (mode->hdisplay & 1) /* odd resolutions */
- right_border++;
- /* keep the border be even */
- if (right_border & 1)
- right_border++;
- adjusted_mode->crtc_hdisplay = scaled_width;
- /* use border instead of border minus one */
- adjusted_mode->crtc_hblank_start =
- scaled_width + right_border;
- /* keep the hblank width constant */
- adjusted_mode->crtc_hblank_end =
- adjusted_mode->crtc_hblank_start +
- hblank_width;
- /*
- * get the hsync start pos relative to
- * hblank start
- */
- hsync_pos = (hblank_width - hsync_width) / 2;
- /* keep the hsync_pos be even */
- if (hsync_pos & 1)
- hsync_pos++;
- adjusted_mode->crtc_hsync_start =
- adjusted_mode->crtc_hblank_start +
- hsync_pos;
- /* keept hsync width constant */
- adjusted_mode->crtc_hsync_end =
- adjusted_mode->crtc_hsync_start +
- hsync_width;
- border = 1;
- } else if (panel_ratio < desired_ratio) { /* letter */
- u32 scaled_height = mode->vdisplay *
- horiz_scale / PANEL_RATIO_FACTOR;
- vert_ratio = horiz_ratio;
- pfit_control |= (HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- /* Letterbox will have top/bottom border */
- top_border = (adjusted_mode->vdisplay -
- scaled_height) / 2;
- bottom_border = top_border;
- if (mode->vdisplay & 1)
- bottom_border++;
- adjusted_mode->crtc_vdisplay = scaled_height;
- /* use border instead of border minus one */
- adjusted_mode->crtc_vblank_start =
- scaled_height + bottom_border;
- /* keep the vblank width constant */
- adjusted_mode->crtc_vblank_end =
- adjusted_mode->crtc_vblank_start +
- vblank_width;
- /*
- * get the vsync start pos relative to
- * vblank start
- */
- vsync_pos = (vblank_width - vsync_width) / 2;
- adjusted_mode->crtc_vsync_start =
- adjusted_mode->crtc_vblank_start +
- vsync_pos;
- /* keep the vsync width constant */
- adjusted_mode->crtc_vsync_end =
- adjusted_mode->crtc_vsync_start +
- vsync_width;
- border = 1;
- } else {
- /* Aspects match, Let hw scale both directions */
- pfit_control |= (VERT_AUTO_SCALE |
- HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- }
- horiz_bits = (1 << bits) * horiz_ratio /
- PANEL_RATIO_FACTOR;
- vert_bits = (1 << bits) * vert_ratio /
- PANEL_RATIO_FACTOR;
- pfit_pgm_ratios =
- ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
- PFIT_VERT_SCALE_MASK) |
- ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
- PFIT_HORIZ_SCALE_MASK);
- }
- break;
+ intel_pch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ } else {
+ intel_gmch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
- case DRM_MODE_SCALE_FULLSCREEN:
- /*
- * Full scaling, even if it changes the aspect ratio.
- * Fortunately this is all done for us in hw.
- */
- pfit_control |= PFIT_ENABLE;
- if (IS_I965G(dev))
- pfit_control |= PFIT_SCALING_AUTO;
- else
- pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- break;
- default:
- break;
}
-out:
- lvds_priv->pfit_control = pfit_control;
- lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
- /*
- * When there exists the border, it means that the LVDS_BORDR
- * should be enabled.
- */
- if (border)
- dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
- else
- dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@@ -542,82 +327,6 @@ out:
return true;
}
-static void intel_lvds_prepare(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
-
- if (IS_IRONLAKE(dev))
- reg = BLC_PWM_CPU_CTL;
- else
- reg = BLC_PWM_CTL;
-
- dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- intel_lvds_set_power(dev, false);
-}
-
-static void intel_lvds_commit( struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
-
- intel_lvds_set_power(dev, true);
-}
-
-static void intel_lvds_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
-
- /*
- * The LVDS pin pair will already have been turned on in the
- * intel_crtc_mode_set since it has a large impact on the DPLL
- * settings.
- */
-
- if (IS_IRONLAKE(dev))
- return;
-
- /*
- * Enable automatic panel scaling so that non-native modes fill the
- * screen. Should be enabled before the pipe is enabled, according to
- * register description and PRM.
- */
- I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
-}
-
-/* Some lid devices report incorrect lid status, assume they're connected */
-static const struct dmi_system_id bad_lid_status[] = {
- {
- .ident = "Aspire One",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
- },
- },
- {
- .ident = "PC-81005",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
- },
- },
- { }
-};
-
/**
* Detect the LVDS connection.
*
@@ -625,14 +334,20 @@ static const struct dmi_system_id bad_lid_status[] = {
* connected and closed means disconnected. We also send hotplug events as
* needed, using lid status notification from the input layer.
*/
-static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
{
- enum drm_connector_status status = connector_status_connected;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status;
- if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
- status = connector_status_disconnected;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
- return status;
+ status = intel_panel_detect(dev);
+ if (status != connector_status_unknown)
+ return status;
+
+ return connector_status_connected;
}
/**
@@ -640,74 +355,103 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
+ struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- ret = intel_ddc_get_modes(intel_output);
+ struct drm_display_mode *mode;
- if (ret)
- return ret;
+ /* use cached edid if we have one */
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ return drm_add_edid_modes(connector, lvds_connector->base.edid);
- /* Didn't get an EDID, so
- * Set wide sync ranges so we get all modes
- * handed to valid_mode for checking
- */
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
+ mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+ if (mode == NULL)
+ return 0;
- if (dev_priv->panel_fixed_mode != NULL) {
- struct drm_display_mode *mode;
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
- drm_mode_probed_add(connector, mode);
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Skipping forced modeset for %s\n", id->ident);
+ return 1;
+}
- return 1;
- }
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+ {
+ .callback = intel_no_modeset_on_lid_dmi_callback,
+ .ident = "Toshiba Tecra A11",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+ },
+ },
- return 0;
-}
+ { } /* terminating entry */
+};
/*
- * Lid events. Note the use of 'modeset_on_lid':
- * - we set it on lid close, and reset it on open
+ * Lid events. Note the use of 'modeset':
+ * - we set it to MODESET_ON_LID_OPEN on lid close,
+ * and set it to MODESET_DONE on open
* - we use it as a "only once" bit (ie we ignore
- * duplicate events where it was already properly
- * set/reset)
- * - the suspend/resume paths will also set it to
- * zero, since they restore the mode ("lid open").
+ * duplicate events where it was already properly set)
+ * - the suspend/resume paths will set it to
+ * MODESET_SUSPENDED and ignore the lid open event,
+ * because they restore the mode ("lid open").
*/
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, lid_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector = dev_priv->int_lvds_connector;
+ struct intel_lvds_connector *lvds_connector =
+ container_of(nb, struct intel_lvds_connector, lid_notifier);
+ struct drm_connector *connector = &lvds_connector->base.base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return NOTIFY_OK;
+
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+ goto exit;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
- if (connector)
- connector->status = connector->funcs->detect(connector);
+ connector->status = connector->funcs->detect(connector, false);
+
+ /* Don't force modeset on machines where it causes a GPU lockup */
+ if (dmi_check_system(intel_no_modeset_on_lid))
+ goto exit;
if (!acpi_lid_open()) {
- dev_priv->modeset_on_lid = 1;
- return NOTIFY_OK;
+ /* do modeset on next lid open event */
+ dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+ goto exit;
}
- if (!dev_priv->modeset_on_lid)
- return NOTIFY_OK;
+ if (dev_priv->modeset_restore == MODESET_DONE)
+ goto exit;
- dev_priv->modeset_on_lid = 0;
+ /*
+ * Some old platform's BIOS love to wreak havoc while the lid is closed.
+ * We try to detect this here and undo any damage. The split for PCH
+ * platforms is rather conservative and a bit arbitrary expect that on
+ * those platforms VGA disabling requires actual legacy VGA I/O access,
+ * and as part of the cleanup in the hw state restore we also redisable
+ * the vga plane.
+ */
+ if (!HAS_PCH_SPLIT(dev)) {
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
+ }
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ dev_priv->modeset_restore = MODESET_DONE;
+exit:
+ mutex_unlock(&dev_priv->modeset_restore_lock);
return NOTIFY_OK;
}
@@ -720,15 +464,17 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds_connector *lvds_connector =
+ to_lvds_connector(connector);
+
+ if (lvds_connector->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ kfree(lvds_connector->base.edid);
+
+ intel_panel_fini(&lvds_connector->base.panel);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
- if (dev_priv->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -737,44 +483,36 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output =
- to_intel_output(connector);
- if (property == dev->mode_config.scaling_mode_property &&
- connector->encoder) {
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct drm_crtc *crtc;
+
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
- return 0;
+ return -EINVAL;
}
- if (lvds_priv->fitting_mode == value) {
+
+ if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- lvds_priv->fitting_mode = value;
+ intel_connector->panel.fitting_mode = value;
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
}
}
return 0;
}
-static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .dpms = intel_lvds_dpms,
- .mode_fixup = intel_lvds_mode_fixup,
- .prepare = intel_lvds_prepare,
- .mode_set = intel_lvds_mode_set,
- .commit = intel_lvds_commit,
-};
-
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@@ -782,28 +520,20 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .save = intel_lvds_save,
- .restore = intel_lvds_restore,
+ .dpms = intel_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
.destroy = intel_lvds_destroy,
};
-
-static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
- .destroy = intel_lvds_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
+ DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
@@ -843,6 +573,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "AOpen Mini PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
@@ -859,108 +597,275 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
.matches = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus EeeBox PC EB1007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Supermicro X7SPA-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D410PT",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D425KT",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D510MO",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D525MW",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
+ },
+ },
{ } /* terminating entry */
};
-/**
- * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
- * @connector: LVDS connector
- *
- * Find the reduced downclock for LVDS in EDID.
- */
-static void intel_find_lvds_downclock(struct drm_device *dev,
- struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *scan, *panel_fixed_mode;
- int temp_downclock;
-
- panel_fixed_mode = dev_priv->panel_fixed_mode;
- temp_downclock = panel_fixed_mode->clock;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(scan, &connector->probed_modes, head) {
- /*
- * If one mode has the same resolution with the fixed_panel
- * mode while they have the different refresh rate, it means
- * that the reduced downclock is found for the LVDS. In such
- * case we can set the different FPx0/1 to dynamically select
- * between low and high frequency.
- */
- if (scan->hdisplay == panel_fixed_mode->hdisplay &&
- scan->hsync_start == panel_fixed_mode->hsync_start &&
- scan->hsync_end == panel_fixed_mode->hsync_end &&
- scan->htotal == panel_fixed_mode->htotal &&
- scan->vdisplay == panel_fixed_mode->vdisplay &&
- scan->vsync_start == panel_fixed_mode->vsync_start &&
- scan->vsync_end == panel_fixed_mode->vsync_end &&
- scan->vtotal == panel_fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- }
- }
- }
- mutex_unlock(&dev->mode_config.mutex);
- if (temp_downclock < panel_fixed_mode->clock) {
- /* We found the downclock for LVDS. */
- dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = temp_downclock;
- DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
- "Normal clock %dKhz, downclock %dKhz\n",
- panel_fixed_mode->clock, temp_downclock);
- }
- return;
-}
-
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
* If it is present, return 1.
* If it is not present, return false.
* If no child dev is parsed from VBT, it assumes that the LVDS is present.
- * Note: The addin_offset should also be checked for LVDS panel.
- * Only when it is non-zero, it is assumed that it is present.
*/
-static int lvds_is_present_in_vbt(struct drm_device *dev)
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not LFP, continue.
- * If the device type is 0x22, it is also regarded as LFP.
+ int i;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+ struct old_child_dev_config *child = &uchild->old;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
- p_child->device_type != DEVICE_TYPE_LFP)
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
continue;
- /* The addin_offset should be checked. Only when it is
- * non-zero, it is regarded as present.
+ if (intel_gmbus_is_port_valid(child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
*/
- if (p_child->addin_offset) {
- ret = 1;
- break;
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_lvds_encoder *lvds_encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->type == INTEL_OUTPUT_LVDS) {
+ lvds_encoder = to_lvds_encoder(&encoder->base);
+
+ return lvds_encoder->is_dual_link;
}
}
- return ret;
+
+ return false;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+ struct drm_device *dev = lvds_encoder->base.base.dev;
+ unsigned int val;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* use the module option value if specified */
+ if (i915.lvds_channel_mode > 0)
+ return i915.lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(lvds_encoder->reg);
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ val = dev_priv->vbt.bios_lvds_val;
+
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
+static bool intel_lvds_supported(struct drm_device *dev)
+{
+ /* With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ return true;
+
+ /* Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm */
+ if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ return true;
+
+ return false;
}
/**
@@ -973,71 +878,102 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output;
+ struct intel_lvds_encoder *lvds_encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_lvds_connector *lvds_connector;
+ struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *downclock_mode = NULL;
+ struct edid *edid;
struct drm_crtc *crtc;
- struct intel_lvds_priv *lvds_priv;
u32 lvds;
- int pipe, gpio = GPIOC;
+ int pipe;
+ u8 pin;
+
+ if (!intel_lvds_supported(dev))
+ return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
- if (!lvds_is_present_in_vbt(dev)) {
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
- if (dev_priv->edp_support) {
+ if (dev_priv->vbt.edp_support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
- gpio = PCH_GPIOC;
}
- intel_output = kzalloc(sizeof(struct intel_output) +
- sizeof(struct intel_lvds_priv), GFP_KERNEL);
- if (!intel_output) {
+ lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
+ if (!lvds_encoder)
+ return;
+
+ lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
+ if (!lvds_connector) {
+ kfree(lvds_encoder);
return;
}
- connector = &intel_output->base;
- encoder = &intel_output->enc;
- drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
+ lvds_encoder->attached_connector = lvds_connector;
+
+ intel_encoder = &lvds_encoder->base;
+ encoder = &intel_encoder->base;
+ intel_connector = &lvds_connector->base;
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- intel_output->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
+ intel_encoder->compute_config = intel_lvds_compute_config;
+ intel_encoder->disable = intel_disable_lvds;
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_encoder->get_config = intel_lvds_get_config;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ intel_encoder->cloneable = 0;
+ if (HAS_PCH_SPLIT(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else if (IS_GEN4(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ else
+ intel_encoder->crtc_mask = (1 << 1);
- intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
- intel_output->crtc_mask = (1 << 1);
- drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
- intel_output->dev_priv = lvds_priv;
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_encoder->reg = PCH_LVDS;
+ } else {
+ lvds_encoder->reg = LVDS;
+ }
+
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
- /*
- * the initial panel fitting mode will be FULL_SCREEN.
- */
-
- drm_connector_attach_property(&intel_output->base,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_FULLSCREEN);
- lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1048,41 +984,71 @@ void intel_lvds_init(struct drm_device *dev)
* if closed, act like it's not there for now
*/
- /* Set up the DDC bus. */
- intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
- if (!intel_output->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- goto failed;
- }
-
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- intel_ddc_get_modes(intel_output);
+ mutex_lock(&dev->mode_config.mutex);
+ edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ } else {
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ lvds_connector->base.edid = edid;
+
+ if (IS_ERR_OR_NULL(edid)) {
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ }
list_for_each_entry(scan, &connector->probed_modes, head) {
- mutex_lock(&dev->mode_config.mutex);
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- dev_priv->panel_fixed_mode =
- drm_mode_duplicate(dev, scan);
- mutex_unlock(&dev->mode_config.mutex);
- intel_find_lvds_downclock(dev, connector);
- goto out;
+ DRM_DEBUG_KMS("using preferred mode from EDID: ");
+ drm_mode_debug_printmodeline(scan);
+
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ if (fixed_mode) {
+ downclock_mode =
+ intel_find_panel_downclock(dev,
+ fixed_mode, connector);
+ if (downclock_mode != NULL &&
+ i915.lvds_downclock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = true;
+ dev_priv->lvds_downclock =
+ downclock_mode->clock;
+ DRM_DEBUG_KMS("LVDS downclock is found"
+ " in EDID. Normal clock %dKhz, "
+ "downclock %dKhz\n",
+ fixed_mode->clock,
+ dev_priv->lvds_downclock);
+ }
+ goto out;
+ }
}
- mutex_unlock(&dev->mode_config.mutex);
}
/* Failed to get EDID, what about VBT? */
- if (dev_priv->lfp_lvds_vbt_mode) {
- mutex_lock(&dev->mode_config.mutex);
- dev_priv->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- mutex_unlock(&dev->mode_config.mutex);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ if (dev_priv->vbt.lfp_lvds_vbt_mode) {
+ DRM_DEBUG_KMS("using mode from VBT: ");
+ drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
+
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (fixed_mode) {
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
@@ -1094,53 +1060,64 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
goto failed;
lvds = I915_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!dev_priv->panel_fixed_mode)
+ if (!fixed_mode)
goto failed;
out:
- if (IS_IRONLAKE(dev)) {
- u32 pwm;
- /* make sure PWM is enabled */
- pwm = I915_READ(BLC_PWM_CPU_CTL2);
- pwm |= (PWM_ENABLE | PWM_PIPE_B);
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
-
- pwm = I915_READ(BLC_PWM_PCH_CTL1);
- pwm |= PWM_PCH_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+ DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
+
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
- dev_priv->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
- dev_priv->lid_notifier.notifier_call = NULL;
+ lvds_connector->lid_notifier.notifier_call = NULL;
}
- /* keep the LVDS connector */
- dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
+
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_setup_backlight(connector);
+
return;
failed:
+ mutex_unlock(&dev->mode_config.mutex);
+
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_output);
+ kfree(lvds_encoder);
+ kfree(lvds_connector);
+ return;
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 67e2f4632a2..0e860f39933 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007 Intel Corporation
+ * Copyright (c) 2007, 2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -23,66 +23,106 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/fb.h>
-#include "drmP.h"
+#include <drm/drm_edid.h>
+#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
/**
- * intel_ddc_probe
- *
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
*/
-bool intel_ddc_probe(struct intel_output *intel_output)
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
{
- u8 out_buf[] = { 0x0, 0x0};
- u8 buf[2];
int ret;
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- intel_i2c_quirk_set(intel_output->base.dev, true);
- ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_output->base.dev, false);
- if (ret == 2)
- return true;
-
- return false;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ return ret;
}
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int intel_ddc_get_modes(struct intel_output *intel_output)
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
{
struct edid *edid;
- int ret = 0;
-
- intel_i2c_quirk_set(intel_output->base.dev, true);
- edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
- intel_i2c_quirk_set(intel_output->base.dev, false);
- if (edid) {
- drm_mode_connector_update_edid_property(&intel_output->base,
- edid);
- ret = drm_add_edid_modes(&intel_output->base, edid);
- intel_output->base.display_info.raw_edid = NULL;
- kfree(edid);
- }
+ int ret;
+
+ edid = drm_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
return ret;
}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+ { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+ { HDMI_AUDIO_OFF, "off" },
+ { HDMI_AUDIO_AUTO, "auto" },
+ { HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+ { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+ { INTEL_BROADCAST_RGB_FULL, "Full" },
+ { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
new file mode 100644
index 00000000000..4f6b53998d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -0,0 +1,910 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+#define PCI_SWSCI 0xe8
+#define PCI_SWSCI_SCISEL (1 << 15)
+#define PCI_SWSCI_GSSCIE (1 << 0)
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI (1<<0)
+#define MBOX_SWSCI (1<<1)
+#define MBOX_ASLE (1<<2)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ u32 scic; /* SWSCI command|status|data */
+ u32 parm; /* command parameters */
+ u32 dslp; /* driver sleep time-out */
+ u8 rsvd[244];
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u32 cddv; /* color correction default values */
+ u32 pcft; /* power conservation features */
+ u32 srot; /* supported rotation angles */
+ u32 iuer; /* IUER events */
+ u8 rsvd[86];
+} __packed;
+
+/* Driver readiness indicator */
+#define ASLE_ARDY_READY (1 << 0)
+#define ASLE_ARDY_NOT_READY (0 << 0)
+
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM (1 << 0)
+#define ASLC_SET_BACKLIGHT (1 << 1)
+#define ASLC_SET_PFIT (1 << 2)
+#define ASLC_SET_PWM_FREQ (1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
+#define ASLC_BUTTON_ARRAY (1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
+#define ASLC_DOCKING_INDICATOR (1 << 7)
+#define ASLC_ISCT_STATE_CHANGE (1 << 8)
+#define ASLC_REQ_MSK 0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED (1 << 10)
+#define ASLC_BACKLIGHT_FAILED (1 << 12)
+#define ASLC_PFIT_FAILED (1 << 14)
+#define ASLC_PWM_FREQ_FAILED (1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
+#define ASLC_CONVERTIBLE_FAILED (1 << 22)
+#define ASLC_DOCKING_FAILED (1 << 24)
+#define ASLC_ISCT_STATE_FAILED (1 << 26)
+
+/* Technology enabled indicator */
+#define ASLE_TCHE_ALS_EN (1 << 0)
+#define ASLE_TCHE_BLC_EN (1 << 1)
+#define ASLE_TCHE_PFIT_EN (1 << 2)
+#define ASLE_TCHE_PFMB_EN (1 << 3)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+/* IUER */
+#define ASLE_IUER_DOCKING (1 << 7)
+#define ASLE_IUER_CONVERTIBLE (1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
+#define ASLE_IUER_WINDOWS_BTN (1 << 1)
+#define ASLE_IUER_POWER_BTN (1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR (1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
+#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+ ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+ (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA 4
+#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB 6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
+#define ACPI_OTHER_OUTPUT (0<<8)
+#define ACPI_VGA_OUTPUT (1<<8)
+#define ACPI_TV_OUTPUT (2<<8)
+#define ACPI_DIGITAL_OUTPUT (3<<8)
+#define ACPI_LVDS_OUTPUT (4<<8)
+
+#define MAX_DSLP 1500
+
+#ifdef CONFIG_ACPI
+static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
+ u32 main_function, sub_function, scic;
+ u16 pci_swsci;
+ u32 dslp;
+
+ if (!swsci)
+ return -ENODEV;
+
+ main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+ SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+ sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+ SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+ /* Check if we can call the function. See swsci_setup for details. */
+ if (main_function == SWSCI_SBCB) {
+ if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ } else if (main_function == SWSCI_GBDA) {
+ if ((dev_priv->opregion.swsci_gbda_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ }
+
+ /* Driver sleep timeout in ms. */
+ dslp = ioread32(&swsci->dslp);
+ if (!dslp) {
+ /* The spec says 2ms should be the default, but it's too small
+ * for some machines. */
+ dslp = 50;
+ } else if (dslp > MAX_DSLP) {
+ /* Hey bios, trust must be earned. */
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
+ }
+
+ /* The spec tells us to do this, but we are the only user... */
+ scic = ioread32(&swsci->scic);
+ if (scic & SWSCI_SCIC_INDICATOR) {
+ DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+ return -EBUSY;
+ }
+
+ scic = function | SWSCI_SCIC_INDICATOR;
+
+ iowrite32(parm, &swsci->parm);
+ iowrite32(scic, &swsci->scic);
+
+ /* Ensure SCI event is selected and event trigger is cleared. */
+ pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
+ if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
+ pci_swsci |= PCI_SWSCI_SCISEL;
+ pci_swsci &= ~PCI_SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+ }
+
+ /* Use event trigger to tell bios to check the mail. */
+ pci_swsci |= PCI_SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+
+ /* Poll for the result. */
+#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
+ if (wait_for(C, dslp)) {
+ DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+ SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+ /* Note: scic == 0 is an error! */
+ if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+ DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+ return -EIO;
+ }
+
+ if (parm_out)
+ *parm_out = ioread32(&swsci->parm);
+
+ return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT 0
+#define DISPLAY_TYPE_TV 1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 parm = 0;
+ u32 type = 0;
+ u32 port;
+
+ /* don't care about old stuff for now */
+ if (!HAS_DDI(dev))
+ return 0;
+
+ port = intel_ddi_get_encoder_port(intel_encoder);
+ if (port == PORT_E) {
+ port = 0;
+ } else {
+ parm |= 1 << port;
+ port++;
+ }
+
+ if (!enable)
+ parm |= 4 << 8;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ type = DISPLAY_TYPE_CRT;
+ break;
+ case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+ break;
+ case INTEL_OUTPUT_EDP:
+ type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+ break;
+ default:
+ WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+ intel_encoder->type);
+ return -EINVAL;
+ }
+
+ parm |= type << (16 + port * 3);
+
+ return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+ pci_power_t pci_power_state;
+ u32 parm;
+} power_state_map[] = {
+ { PCI_D0, 0x00 },
+ { PCI_D1, 0x01 },
+ { PCI_D2, 0x02 },
+ { PCI_D3hot, 0x04 },
+ { PCI_D3cold, 0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+ int i;
+
+ if (!HAS_DDI(dev))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+ if (state == power_state_map[i].pci_power_state)
+ return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ power_state_map[i].parm, NULL);
+ }
+
+ return -EINVAL;
+}
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+
+ DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
+ /*
+ * If the acpi_video interface is not supposed to be used, don't
+ * bother processing backlight level change requests from firmware.
+ */
+ if (!acpi_video_verify_backlight_support()) {
+ DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ return 0;
+ }
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLC_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLC_BACKLIGHT_FAILED;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ /*
+ * Update backlight on all connectors that support backlight (usually
+ * only one).
+ */
+ DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
+ intel_panel_set_backlight(intel_connector, bclp, 255);
+ iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+
+ return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+ /* alsi is the current ALS reading in lux. 0 indicates below sensor
+ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+ DRM_DEBUG_DRIVER("Illum is not supported\n");
+ return ASLC_ALS_ILLUM_FAILED;
+}
+
+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+ DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+ return ASLC_PWM_FREQ_FAILED;
+}
+
+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+ /* Panel fitting is currently controlled by the X code, so this is a
+ noop until modesetting support works fully */
+ DRM_DEBUG_DRIVER("Pfit is not supported\n");
+ return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+{
+ DRM_DEBUG_DRIVER("SROT is not supported\n");
+ return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+{
+ if (!iuer)
+ DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+ if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+ if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+ if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+ if (iuer & ASLE_IUER_WINDOWS_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+ if (iuer & ASLE_IUER_POWER_BTN)
+ DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+ return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+{
+ if (iuer & ASLE_IUER_CONVERTIBLE)
+ DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+ else
+ DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+ return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+{
+ if (iuer & ASLE_IUER_DOCKING)
+ DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+ else
+ DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+ return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_device *dev)
+{
+ DRM_DEBUG_DRIVER("ISCT is not supported\n");
+ return ASLC_ISCT_STATE_FAILED;
+}
+
+static void asle_work(struct work_struct *work)
+{
+ struct intel_opregion *opregion =
+ container_of(work, struct intel_opregion, asle_work);
+ struct drm_i915_private *dev_priv =
+ container_of(opregion, struct drm_i915_private, opregion);
+ struct drm_device *dev = dev_priv->dev;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+ u32 aslc_stat = 0;
+ u32 aslc_req;
+
+ if (!asle)
+ return;
+
+ aslc_req = ioread32(&asle->aslc);
+
+ if (!(aslc_req & ASLC_REQ_MSK)) {
+ DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+ aslc_req);
+ return;
+ }
+
+ if (aslc_req & ASLC_SET_ALS_ILLUM)
+ aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+
+ if (aslc_req & ASLC_SET_BACKLIGHT)
+ aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+ if (aslc_req & ASLC_SET_PFIT)
+ aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+
+ if (aslc_req & ASLC_SET_PWM_FREQ)
+ aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
+
+ if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+ aslc_stat |= asle_set_supported_rotation_angles(dev,
+ ioread32(&asle->srot));
+
+ if (aslc_req & ASLC_BUTTON_ARRAY)
+ aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
+
+ if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+ aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
+
+ if (aslc_req & ASLC_DOCKING_INDICATOR)
+ aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
+
+ if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+ aslc_stat |= asle_isct_state(dev);
+
+ iowrite32(aslc_stat, &asle->aslc);
+}
+
+void intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->opregion.asle)
+ schedule_work(&dev_priv->opregion.asle_work);
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+static int intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ /* The only video events relevant to opregion are 0x80. These indicate
+ either a docking event, lid switch or display switch request. In
+ Linux, these are handled by the dock, button and video drivers.
+ */
+
+ struct opregion_acpi __iomem *acpi;
+ struct acpi_bus_event *event = data;
+ int ret = NOTIFY_OK;
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ if (!system_opregion)
+ return NOTIFY_DONE;
+
+ acpi = system_opregion->acpi;
+
+ if (event->type == 0x80 &&
+ (ioread32(&acpi->cevt) & 1) == 0)
+ ret = NOTIFY_BAD;
+
+ iowrite32(0, &acpi->csts);
+
+ return ret;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+ .notifier_call = intel_opregion_video_event,
+};
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+static void intel_didl_outputs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ struct drm_connector *connector;
+ acpi_handle handle;
+ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+ unsigned long long device_id;
+ acpi_status status;
+ u32 temp;
+ int i = 0;
+
+ handle = ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev))
+ return;
+
+ if (acpi_is_video_device(handle))
+ acpi_video_bus = acpi_dev;
+ else {
+ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+ if (acpi_is_video_device(acpi_cdev->handle)) {
+ acpi_video_bus = acpi_cdev;
+ break;
+ }
+ }
+ }
+
+ if (!acpi_video_bus) {
+ pr_warn("No ACPI video bus found\n");
+ return;
+ }
+
+ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+ if (i >= 8) {
+ dev_dbg(&dev->pdev->dev,
+ "More than 8 outputs detected via ACPI\n");
+ return;
+ }
+ status =
+ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+ if (!device_id)
+ goto blind_set;
+ iowrite32((u32)(device_id & 0x0f0f),
+ &opregion->acpi->didl[i]);
+ i++;
+ }
+ }
+
+end:
+ /* If fewer than 8 outputs, the list must be null terminated */
+ if (i < 8)
+ iowrite32(0, &opregion->acpi->didl[i]);
+ return;
+
+blind_set:
+ i = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int output_type = ACPI_OTHER_OUTPUT;
+ if (i >= 8) {
+ dev_dbg(&dev->pdev->dev,
+ "More than 8 outputs in connector list\n");
+ return;
+ }
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ output_type = ACPI_VGA_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ output_type = ACPI_TV_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ output_type = ACPI_DIGITAL_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ output_type = ACPI_LVDS_OUTPUT;
+ break;
+ }
+ temp = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(temp | (1<<31) | output_type | i,
+ &opregion->acpi->didl[i]);
+ i++;
+ }
+ goto end;
+}
+
+static void intel_setup_cadls(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int i = 0;
+ u32 disp_id;
+
+ /* Initialize the CADL field by duplicating the DIDL values.
+ * Technically, this is not always correct as display outputs may exist,
+ * but not active. This initialization is necessary for some Clevo
+ * laptops that check this field before processing the brightness and
+ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+ * there are less than eight devices. */
+ do {
+ disp_id = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(disp_id, &opregion->acpi->cadl[i]);
+ } while (++i < 8 && disp_id != 0);
+}
+
+void intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
+ }
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+ * We don't actually need to do anything with them. */
+ iowrite32(0, &opregion->acpi->csts);
+ iowrite32(1, &opregion->acpi->drdy);
+
+ system_opregion = opregion;
+ register_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ if (opregion->asle) {
+ iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
+ iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
+ }
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->asle)
+ iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
+
+ cancel_work_sync(&dev_priv->opregion.asle_work);
+
+ if (opregion->acpi) {
+ iowrite32(0, &opregion->acpi->drdy);
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+ opregion->lid_state = NULL;
+}
+
+static void swsci_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ bool requested_callbacks = false;
+ u32 tmp;
+
+ /* Sub-function code 0 is okay, let's allow them. */
+ opregion->swsci_gbda_sub_functions = 1;
+ opregion->swsci_sbcb_sub_functions = 1;
+
+ /* We use GBDA to ask for supported GBDA calls. */
+ if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ tmp <<= 1;
+ opregion->swsci_gbda_sub_functions |= tmp;
+ }
+
+ /*
+ * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+ * must not call interfaces that are not specifically requested by the
+ * bios.
+ */
+ if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ /* here, the bits already match sub-function codes */
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ requested_callbacks = true;
+ }
+
+ /*
+ * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+ * the callback is _requested_. But we still can't call interfaces that
+ * are not requested.
+ */
+ if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ u32 low = tmp & 0x7ff;
+ u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+ tmp = (high << 4) | (low << 1) | 1;
+
+ /* best guess what to do with supported wrt requested */
+ if (requested_callbacks) {
+ u32 req = opregion->swsci_sbcb_sub_functions;
+ if ((req & tmp) != req)
+ DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+ /* XXX: for now, trust the requested callbacks */
+ /* opregion->swsci_sbcb_sub_functions &= tmp; */
+ } else {
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+ opregion->swsci_gbda_sub_functions,
+ opregion->swsci_sbcb_sub_functions);
+}
+#else /* CONFIG_ACPI */
+static inline void swsci_setup(struct drm_device *dev) {}
+#endif /* CONFIG_ACPI */
+
+int intel_opregion_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ void __iomem *base;
+ u32 asls, mboxes;
+ char buf[sizeof(OPREGION_SIGNATURE)];
+ int err = 0;
+
+ pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+ DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+ if (asls == 0) {
+ DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+ return -ENOTSUPP;
+ }
+
+#ifdef CONFIG_ACPI
+ INIT_WORK(&opregion->asle_work, asle_work);
+#endif
+
+ base = acpi_os_ioremap(asls, OPREGION_SIZE);
+ if (!base)
+ return -ENOMEM;
+
+ memcpy_fromio(buf, base, sizeof(buf));
+
+ if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+ opregion->lid_state = base + ACPI_CLID;
+
+ mboxes = ioread32(&opregion->header->mboxes);
+ if (mboxes & MBOX_ACPI) {
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ }
+
+ if (mboxes & MBOX_SWSCI) {
+ DRM_DEBUG_DRIVER("SWSCI supported\n");
+ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+ swsci_setup(dev);
+ }
+ if (mboxes & MBOX_ASLE) {
+ DRM_DEBUG_DRIVER("ASLE supported\n");
+ opregion->asle = base + OPREGION_ASLE_OFFSET;
+
+ iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
+ }
+
+ return 0;
+
+err_out:
+ iounmap(base);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2639591c72e..daa118978ee 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,9 +25,8 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
@@ -65,7 +64,7 @@
#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
-#define OCMD_BUF_TYPE_MASK (Ox1<<5)
+#define OCMD_BUF_TYPE_MASK (0x1<<5)
#define OCMD_BUF_TYPE_FRAME (0x0<<5)
#define OCMD_BUF_TYPE_FIELD (0x1<<5)
#define OCMD_TEST_MODE (0x1<<4)
@@ -115,147 +114,159 @@
/* memory bufferd overlay registers */
struct overlay_registers {
- u32 OBUF_0Y;
- u32 OBUF_1Y;
- u32 OBUF_0U;
- u32 OBUF_0V;
- u32 OBUF_1U;
- u32 OBUF_1V;
- u32 OSTRIDE;
- u32 YRGB_VPH;
- u32 UV_VPH;
- u32 HORZ_PH;
- u32 INIT_PHS;
- u32 DWINPOS;
- u32 DWINSZ;
- u32 SWIDTH;
- u32 SWIDTHSW;
- u32 SHEIGHT;
- u32 YRGBSCALE;
- u32 UVSCALE;
- u32 OCLRC0;
- u32 OCLRC1;
- u32 DCLRKV;
- u32 DCLRKM;
- u32 SCLRKVH;
- u32 SCLRKVL;
- u32 SCLRKEN;
- u32 OCONFIG;
- u32 OCMD;
- u32 RESERVED1; /* 0x6C */
- u32 OSTART_0Y;
- u32 OSTART_1Y;
- u32 OSTART_0U;
- u32 OSTART_0V;
- u32 OSTART_1U;
- u32 OSTART_1V;
- u32 OTILEOFF_0Y;
- u32 OTILEOFF_1Y;
- u32 OTILEOFF_0U;
- u32 OTILEOFF_0V;
- u32 OTILEOFF_1U;
- u32 OTILEOFF_1V;
- u32 FASTHSCALE; /* 0xA0 */
- u32 UVSCALEV; /* 0xA4 */
- u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
- u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
- u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
- u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
- u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
- u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
- u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
- u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
- u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
};
-/* overlay flip addr flag */
-#define OFC_UPDATE 0x1
-
-#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
-
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ /* flip handling */
+ uint32_t last_flip_req;
+ void (*flip_tail)(struct intel_overlay *);
+};
-static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+static struct overlay_registers __iomem *
+intel_overlay_map_regs(struct intel_overlay *overlay)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers __iomem *regs;
- /* no recursive mappings */
- BUG_ON(overlay->virt_addr);
-
- if (OVERLAY_NONPHYSICAL(overlay->dev)) {
- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
+ else
+ regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
- if (!regs) {
- DRM_ERROR("failed to map overlay regs in GTT\n");
- return NULL;
- }
- } else
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ return regs;
+}
- return overlay->virt_addr = regs;
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+ struct overlay_registers __iomem *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap(regs);
}
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- io_mapping_unmap_atomic(overlay->virt_addr);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ int ret;
- overlay->virt_addr = NULL;
+ BUG_ON(overlay->last_flip_req);
+ ret = i915_add_request(ring, &overlay->last_flip_req);
+ if (ret)
+ return ret;
- I915_READ(OVADD); /* flush wc cashes */
+ overlay->flip_tail = tail;
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ if (ret)
+ return ret;
+ i915_gem_retire_requests(dev);
- return;
+ overlay->last_flip_req = 0;
+ return 0;
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- RING_LOCALS;
BUG_ON(overlay->active);
-
overlay->active = 1;
- overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
-
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- OUT_RING(overlay->flip_addr | OFC_UPDATE);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
- if (ret != 0)
+ WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
return ret;
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return intel_overlay_do_wait_request(overlay, NULL);
}
/* overlay needs to be enabled in OCMD reg */
-static void intel_overlay_continue(struct intel_overlay *overlay,
- bool load_polyphase_filter)
+static int intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
u32 tmp;
- RING_LOCALS;
+ int ret;
BUG_ON(!overlay->active);
@@ -267,65 +278,51 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(4);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- ADVANCE_LP_RING();
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ return i915_add_request(ring, &overlay->last_flip_req);
}
-static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- u32 tmp;
- RING_LOCALS;
-
- if (overlay->last_flip_req != 0) {
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
- if (ret == 0) {
- overlay->last_flip_req = 0;
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
- tmp = I915_READ(ISR);
+ i915_gem_object_ggtt_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
- if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
- return 0;
- }
- }
-
- /* synchronous slowpath */
- overlay->hw_wedged = RELEASE_OLD_VID;
+ overlay->old_vid_bo = NULL;
+}
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
- if (ret != 0)
- return ret;
+ i915_gem_object_ggtt_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ overlay->vid_bo = NULL;
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
}
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ u32 flip_addr = overlay->flip_addr;
int ret;
- RING_LOCALS;
BUG_ON(!overlay->active);
@@ -335,158 +332,89 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- /* wait for overlay to go idle */
- overlay->hw_wedged = SWITCH_OFF_STAGE_1;
-
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
- if (ret != 0)
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
return ret;
+ /* wait for overlay to go idle */
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
- if (ret != 0)
- return ret;
-
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return ret;
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
- struct drm_gem_object *obj;
-
- /* never have the overlay hw on without showing a frame */
- BUG_ON(!overlay->vid_bo);
- obj = overlay->vid_bo->obj;
-
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->vid_bo = NULL;
+ if (IS_I830(dev)) {
+ /* Workaround: Don't disable the overlay fully, since otherwise
+ * it dies on the next OVERLAY_ON cmd. */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ } else {
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ }
+ intel_ring_advance(ring);
- overlay->crtc->overlay = NULL;
- overlay->crtc = NULL;
- overlay->active = 0;
+ return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
-int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- u32 flip_addr;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- RING_LOCALS;
-
- if (overlay->hw_wedged == HW_WEDGED)
- return -EIO;
- if (overlay->last_flip_req == 0) {
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
- }
+ if (overlay->last_flip_req == 0)
+ return 0;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
- if (ret != 0)
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ if (ret)
return ret;
+ i915_gem_retire_requests(dev);
- switch (overlay->hw_wedged) {
- case RELEASE_OLD_VID:
- obj = overlay->old_vid_bo->obj;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
- break;
- case SWITCH_OFF_STAGE_1:
- flip_addr = overlay->flip_addr;
- flip_addr |= OFC_UPDATE;
-
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible);
- if (ret != 0)
- return ret;
-
- case SWITCH_OFF_STAGE_2:
- intel_overlay_off_tail(overlay);
- break;
- default:
- BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
- }
+ if (overlay->flip_tail)
+ overlay->flip_tail(overlay);
- overlay->hw_wedged = 0;
overlay->last_flip_req = 0;
return 0;
}
/* Wait for pending overlay flip and release old frame.
* Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs_atomic */
+ * via intel_overlay_(un)map_regs
+ */
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
+ struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- struct drm_gem_object *obj;
- /* only wait if there is actually an old frame to release to
- * guarantee forward progress */
+ /* Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
if (!overlay->old_vid_bo)
return 0;
- ret = intel_overlay_wait_flip(overlay);
- if (ret != 0)
- return ret;
+ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ /* synchronous slowpath */
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- obj = overlay->old_vid_bo->obj;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_overlay_do_wait_request(overlay,
+ intel_overlay_release_old_vid_tail);
+ if (ret)
+ return ret;
+ }
+ intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -510,67 +438,67 @@ struct put_image_params {
static int packed_depth_bytes(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return 4;
- case I915_OVERLAY_YUV411:
- /* return 6; not implemented */
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
}
}
static int packed_width_bytes(u32 format, short width)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return width << 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
}
}
static int uv_hsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV420:
- return 2;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- return 4;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
}
}
static int uv_vsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV420:
- case I915_OVERLAY_YUV410:
- return 2;
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV411:
- return 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
}
}
static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
{
u32 mask, shift, ret;
- if (IS_I9XX(dev)) {
- mask = 0x3f;
- shift = 6;
- } else {
+ if (IS_GEN2(dev)) {
mask = 0x1f;
shift = 5;
+ } else {
+ mask = 0x3f;
+ shift = 6;
}
ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (IS_I9XX(dev))
+ if (!IS_GEN2(dev))
ret <<= 1;
- ret -=1;
+ ret -= 1;
return ret << 2;
}
@@ -591,7 +519,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
- 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
@@ -601,16 +531,18 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
- 0x3000, 0x0800, 0x3000};
+ 0x3000, 0x0800, 0x3000
+};
-static void update_polyphase_filter(struct overlay_registers *regs)
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
{
- memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
- memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+ memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+ sizeof(uv_static_hcoeffs));
}
static bool update_scaling_factors(struct intel_overlay *overlay,
- struct overlay_registers *regs,
+ struct overlay_registers __iomem *regs,
struct put_image_params *params)
{
/* fixed point with a 12 bit shift */
@@ -634,29 +566,34 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
yscale = 1 << FP_SHIFT;
/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
- xscale_UV = xscale/uv_hscale;
- yscale_UV = yscale/uv_vscale;
- /* make the Y scale to UV scale ratio an exact multiply */
- xscale = xscale_UV * uv_hscale;
- yscale = yscale_UV * uv_vscale;
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
/*} else {
- xscale_UV = 0;
- yscale_UV = 0;
- }*/
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
scale_changed = true;
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
- | ((xscale >> FP_SHIFT) << 16)
- | ((xscale & FRACT_MASK) << 3);
- regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
- | ((xscale_UV >> FP_SHIFT) << 16)
- | ((xscale_UV & FRACT_MASK) << 3);
- regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
- | ((yscale_UV >> FP_SHIFT) << 0);
+ iowrite32(((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3),
+ &regs->YRGBSCALE);
+
+ iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3),
+ &regs->UVSCALE);
+
+ iowrite32((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)),
+ &regs->UVSCALEV);
if (scale_changed)
update_polyphase_filter(regs);
@@ -665,25 +602,33 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
}
static void update_colorkey(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
u32 key = overlay->color_key;
- switch (overlay->crtc->base.fb->bits_per_pixel) {
- case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
- case 16:
- if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
- } else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
- }
- case 24:
- case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+
+ switch (overlay->crtc->base.primary->fb->bits_per_pixel) {
+ case 8:
+ iowrite32(0, &regs->DCLRKV);
+ iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
+ break;
+
+ case 16:
+ if (overlay->crtc->base.primary->fb->depth == 15) {
+ iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
+ } else {
+ iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
+ }
+ break;
+
+ case 24:
+ case 32:
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
+ break;
}
}
@@ -693,160 +638,166 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
if (params->format & I915_OVERLAY_YUV_PLANAR) {
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PLANAR;
- break;
- case I915_OVERLAY_YUV420:
- cmd |= OCMD_YUV_420_PLANAR;
- break;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- cmd |= OCMD_YUV_410_PLANAR;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
}
} else { /* YUV packed */
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PACKED;
- break;
- case I915_OVERLAY_YUV411:
- cmd |= OCMD_YUV_411_PACKED;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
}
switch (params->format & I915_OVERLAY_SWAP_MASK) {
- case I915_OVERLAY_NO_SWAP:
- break;
- case I915_OVERLAY_UV_SWAP:
- cmd |= OCMD_UV_SWAP;
- break;
- case I915_OVERLAY_Y_SWAP:
- cmd |= OCMD_Y_SWAP;
- break;
- case I915_OVERLAY_Y_AND_UV_SWAP:
- cmd |= OCMD_Y_AND_UV_SWAP;
- break;
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
}
}
return cmd;
}
-int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
- struct put_image_params *params)
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_i915_gem_object *new_bo,
+ struct put_image_params *params)
{
int ret, tmp_width;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
bool scale_changed = false;
- struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
struct drm_device *dev = overlay->dev;
+ u32 swidth, swidthsw, sheight, ostride;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
BUG_ON(!overlay);
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (ret != 0)
return ret;
- ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
- if (ret != 0)
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
goto out_unpin;
if (!overlay->active) {
- regs = intel_overlay_map_regs_atomic(overlay);
+ u32 oconfig;
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
- regs->OCONFIG = OCONF_CC_OUT_8BIT;
- if (IS_I965GM(overlay->dev))
- regs->OCONFIG |= OCONF_CSC_MODE_BT709;
- regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ oconfig = OCONF_CC_OUT_8BIT;
+ if (IS_GEN4(overlay->dev))
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
- intel_overlay_unmap_regs_atomic(overlay);
+ iowrite32(oconfig, &regs->OCONFIG);
+ intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
if (ret != 0)
goto out_unpin;
}
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
- regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
- regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+ iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+ iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
else
tmp_width = params->src_w;
- regs->SWIDTH = params->src_w;
- regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
- regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
- regs->OSTRIDE = params->stride_Y;
+ swidth = params->src_w;
+ swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ sheight = params->src_h;
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+ ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
- regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ swidth |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
- params->src_w/uv_hscale);
+ params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
- params->src_w/uv_hscale);
- regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
- regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
- regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
- regs->OSTRIDE |= params->stride_UV << 16;
+ params->src_w/uv_hscale);
+ swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+ sheight |= (params->src_h/uv_vscale) << 16;
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+ ostride |= params->stride_UV << 16;
}
+ iowrite32(swidth, &regs->SWIDTH);
+ iowrite32(swidthsw, &regs->SWIDTHSW);
+ iowrite32(sheight, &regs->SHEIGHT);
+ iowrite32(ostride, &regs->OSTRIDE);
+
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
- regs->OCMD = overlay_cmd_reg(params);
+ iowrite32(overlay_cmd_reg(params), &regs->OCMD);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
- intel_overlay_continue(overlay, scale_changed);
+ ret = intel_overlay_continue(overlay, scale_changed);
+ if (ret)
+ goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = new_bo->driver_private;
+ overlay->vid_bo = new_bo;
return 0;
out_unpin:
- i915_gem_object_unpin(new_bo);
+ i915_gem_object_ggtt_unpin(new_bo);
return ret;
}
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- int ret;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
struct drm_device *dev = overlay->dev;
+ int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- return ret;
- }
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ return ret;
if (!overlay->active)
return 0;
@@ -855,33 +806,26 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- regs = intel_overlay_map_regs_atomic(overlay);
- regs->OCMD = 0;
- intel_overlay_unmap_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
+ iowrite32(0, &regs->OCMD);
+ intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay);
if (ret != 0)
return ret;
intel_overlay_off_tail(overlay);
-
return 0;
}
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
struct intel_crtc *crtc)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- u32 pipeconf;
- int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
-
- if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!crtc->active)
return -EINVAL;
- pipeconf = I915_READ(pipeconf_reg);
-
/* can't use the overlay with double wide pipe */
- if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+ if (crtc->config.double_wide)
return -EINVAL;
return 0;
@@ -890,20 +834,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 ratio;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
- * line with the intel documentation for the i965 */
- if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
- ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
- } else { /* on i965 use the PGM reg to read out the autoscaler values */
- ratio = I915_READ(PFIT_PGM_RATIOS);
- if (IS_I965G(dev))
- ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+ * line with the intel documentation for the i965
+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = I915_READ(PFIT_AUTO_RATIOS);
else
- ratio >>= PFIT_VERT_SCALE_SHIFT;
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
}
overlay->pfit_vscale_ratio = ratio;
@@ -914,12 +860,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
- if ((rec->dst_x < mode->crtc_hdisplay)
- && (rec->dst_x + rec->dst_width
- <= mode->crtc_hdisplay)
- && (rec->dst_y < mode->crtc_vdisplay)
- && (rec->dst_y + rec->dst_height
- <= mode->crtc_vdisplay))
+ if (rec->dst_x < mode->hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->hdisplay &&
+ rec->dst_y < mode->vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->vdisplay)
return 0;
else
return -EINVAL;
@@ -942,123 +886,155 @@ static int check_overlay_scaling(struct put_image_params *rec)
static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
- struct drm_gem_object *new_bo)
+ struct drm_i915_gem_object *new_bo)
{
- u32 stride_mask;
- int depth;
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
- size_t tmp;
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
/* check src dimensions */
if (IS_845G(dev) || IS_I830(dev)) {
- if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
- || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
} else {
- if (rec->src_height > IMAGE_MAX_HEIGHT
- || rec->src_width > IMAGE_MAX_WIDTH)
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
return -EINVAL;
}
+
/* better safe than sorry, use 4 as the maximal subsampling ratio */
- if (rec->src_height < N_VERT_Y_TAPS*4
- || rec->src_width < N_HORIZ_Y_TAPS*4)
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
return -EINVAL;
- /* check alingment constrains */
+ /* check alignment constraints */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- /* not implemented */
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
return -EINVAL;
- case I915_OVERLAY_YUV_PACKED:
- depth = packed_depth_bytes(rec->flags);
- if (uv_vscale != 1)
- return -EINVAL;
- if (depth < 0)
- return depth;
- /* ignore UV planes */
- rec->stride_UV = 0;
- rec->offset_U = 0;
- rec->offset_V = 0;
- /* check pixel alignment */
- if (rec->offset_Y % depth)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (uv_vscale < 0 || uv_hscale < 0)
- return -EINVAL;
- /* no offset restrictions for planar formats */
- break;
- default:
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
}
if (rec->src_width % uv_hscale)
return -EINVAL;
/* stride checking */
- stride_mask = 63;
+ if (IS_I830(dev) || IS_845G(dev))
+ stride_mask = 255;
+ else
+ stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_I965G(dev) && rec->stride_Y < 512)
+ if (IS_GEN4(dev) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
- 4 : 8;
- if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
return -EINVAL;
/* check buffer dimensions */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- case I915_OVERLAY_YUV_PACKED:
- /* always 4 Y values per depth pixels */
- if (packed_width_bytes(rec->flags, rec->src_width)
- > rec->stride_Y)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (rec->src_width > rec->stride_Y)
- return -EINVAL;
- if (rec->src_width/uv_hscale > rec->stride_UV)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- tmp = rec->stride_UV*rec->src_height;
- tmp /= uv_vscale;
- if (rec->offset_U + tmp > new_bo->size
- || rec->offset_V + tmp > new_bo->size)
- return -EINVAL;
- break;
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
}
return 0;
}
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_GEN4(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- struct drm_gem_object *new_bo;
+ struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1066,41 +1042,49 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
- params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!drmmode_obj)
- return -ENOENT;
+ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj) {
+ ret = -ENOENT;
+ goto out_free;
+ }
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
- new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
- if (!new_bo)
- return -ENOENT;
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle));
+ if (&new_bo->base == NULL) {
+ ret = -ENOENT;
+ goto out_free;
+ }
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- goto out_unlock;
+ if (new_bo->tiling_mode) {
+ DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
}
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
ret = intel_overlay_switch_off(overlay);
@@ -1114,9 +1098,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
overlay->crtc = crtc;
crtc->overlay = overlay;
- if (intel_panel_fitter_pipe(dev) == crtc->pipe
- /* and line to wide, i.e. one-line-mode */
- && mode->hdisplay > 1024) {
+ /* line too wide, i.e. one-line-mode */
+ if (mode->hdisplay > 1024 &&
+ intel_panel_fitter_pipe(dev) == crtc->pipe) {
overlay->pfit_active = 1;
update_pfit_vscale_ratio(overlay);
} else
@@ -1129,10 +1113,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (overlay->pfit_active) {
params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
- overlay->pfit_vscale_ratio);
+ overlay->pfit_vscale_ratio);
/* shifting right rounds downwards, so add 1 */
params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
- overlay->pfit_vscale_ratio) + 1;
+ overlay->pfit_vscale_ratio) + 1;
} else {
params->dst_y = put_image_rec->dst_y;
params->dst_h = put_image_rec->dst_height;
@@ -1144,8 +1128,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
params->src_h = put_image_rec->src_height;
params->src_scan_w = put_image_rec->src_scan_width;
params->src_scan_h = put_image_rec->src_scan_height;
- if (params->src_scan_h > params->src_h
- || params->src_scan_w > params->src_w) {
+ if (params->src_scan_h > params->src_h ||
+ params->src_scan_w > params->src_w) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1170,7 +1154,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
kfree(params);
@@ -1178,18 +1162,20 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference(new_bo);
+ drm_modeset_unlock_all(dev);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
+out_free:
kfree(params);
return ret;
}
static void update_reg_attrs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
- regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
- regs->OCLRC1 = overlay->saturation;
+ iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ &regs->OCLRC0);
+ iowrite32(overlay->saturation, &regs->OCLRC1);
}
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1200,7 +1186,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
return false;
for (i = 0; i < 3; i++) {
- if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
return false;
}
@@ -1221,49 +1207,48 @@ static bool check_gamma5_errata(u32 gamma5)
static int check_gamma(struct drm_intel_overlay_attrs *attrs)
{
- if (!check_gamma_bounds(0, attrs->gamma0)
- || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
- || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
- || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
- || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
- || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
- || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
return -EINVAL;
+
if (!check_gamma5_errata(attrs->gamma5))
return -EINVAL;
+
return 0;
}
int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_attrs *attrs = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
return -ENODEV;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
- attrs->color_key = overlay->color_key;
+ attrs->color_key = overlay->color_key;
attrs->brightness = overlay->brightness;
- attrs->contrast = overlay->contrast;
+ attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1271,29 +1256,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->gamma4 = I915_READ(OGAMC4);
attrs->gamma5 = I915_READ(OGAMC5);
}
- ret = 0;
} else {
- overlay->color_key = attrs->color_key;
- if (attrs->brightness >= -128 && attrs->brightness <= 127) {
- overlay->brightness = attrs->brightness;
- } else {
- ret = -EINVAL;
+ if (attrs->brightness < -128 || attrs->brightness > 127)
goto out_unlock;
- }
- if (attrs->contrast <= 255) {
- overlay->contrast = attrs->contrast;
- } else {
- ret = -EINVAL;
+ if (attrs->contrast > 255)
goto out_unlock;
- }
- if (attrs->saturation <= 1023) {
- overlay->saturation = attrs->saturation;
- } else {
- ret = -EINVAL;
+ if (attrs->saturation > 1023)
goto out_unlock;
- }
- regs = intel_overlay_map_regs_atomic(overlay);
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unlock;
@@ -1301,13 +1277,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (!IS_I9XX(dev)) {
- ret = -EINVAL;
+ if (IS_GEN2(dev))
goto out_unlock;
- }
if (overlay->active) {
ret = -EBUSY;
@@ -1315,7 +1289,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
}
ret = check_gamma(attrs);
- if (ret != 0)
+ if (ret)
goto out_unlock;
I915_WRITE(OGAMC0, attrs->gamma0);
@@ -1325,52 +1299,66 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
I915_WRITE(OGAMC4, attrs->gamma4);
I915_WRITE(OGAMC5, attrs->gamma5);
}
- ret = 0;
}
+ ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
void intel_setup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_gem_object *reg_bo;
- struct overlay_registers *regs;
+ struct drm_i915_gem_object *reg_bo;
+ struct overlay_registers __iomem *regs;
int ret;
- if (!OVERLAY_EXISTS(dev))
+ if (!HAS_OVERLAY(dev))
return;
- overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dev_priv->overlay))
+ goto out_free;
+
overlay->dev = dev;
- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
- if (!reg_bo)
+ reg_bo = NULL;
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
goto out_free;
- overlay->reg_bo = reg_bo->driver_private;
+ overlay->reg_bo = reg_bo;
- if (OVERLAY_NONPHYSICAL(dev)) {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) {
- DRM_ERROR("failed to pin overlay register bo\n");
- goto out_free_bo;
- }
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = reg_bo->phys_handle->busaddr;
} else {
- ret = i915_gem_attach_phys_object(dev, reg_bo,
- I915_GEM_PHYS_OVERLAY_REGS);
- if (ret) {
- DRM_ERROR("failed to attach phys overlay regs\n");
- goto out_free_bo;
- }
- overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
}
/* init all values */
@@ -1379,38 +1367,167 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->contrast = 75;
overlay->saturation = 146;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs)
- goto out_free_bo;
+ goto out_unpin_bo;
- memset(regs, 0, sizeof(struct overlay_registers));
+ memset_io(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
-
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
+ mutex_unlock(&dev->struct_mutex);
DRM_INFO("initialized overlay support\n");
return;
+out_unpin_bo:
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo:
- drm_gem_object_unreference(reg_bo);
+ drm_gem_object_unreference(&reg_bo->base);
out_free:
+ mutex_unlock(&dev->struct_mutex);
kfree(overlay);
return;
}
void intel_cleanup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->overlay) {
- /* The bo's should be free'd by the generic code already.
- * Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already */
- BUG_ON(dev_priv->overlay->active);
+ if (!dev_priv->overlay)
+ return;
- kfree(dev_priv->overlay);
- }
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ kfree(dev_priv->overlay);
+}
+
+struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+ u32 dovsta;
+ u32 isr;
+};
+
+static struct overlay_registers __iomem *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers __iomem *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ /* Cast to make sparse happy, but it's wc memory anyway, so
+ * equivalent to the wc io mapping on X86. */
+ regs = (struct overlay_registers __iomem *)
+ overlay->reg_bo->phys_handle->vaddr;
+ else
+ regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
+
+ return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+ struct overlay_registers __iomem *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(regs);
+}
+
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay_error_state *error;
+ struct overlay_registers __iomem *regs;
+
+ if (!overlay || !overlay->active)
+ return NULL;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ error->dovsta = I915_READ(DOVSTA);
+ error->isr = I915_READ(ISR);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
+ else
+ error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto err;
+
+ memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+ intel_overlay_unmap_regs_atomic(overlay, regs);
+
+ return error;
+
+err:
+ kfree(error);
+ return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
+ struct intel_overlay_error_state *error)
+{
+ i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ i915_error_printf(m, " Register file at 0x%08lx:\n",
+ error->base);
+
+#define P(x) i915_error_printf(m, " " #x ": 0x%08x\n", error->regs.x)
+ P(OBUF_0Y);
+ P(OBUF_1Y);
+ P(OBUF_0U);
+ P(OBUF_0V);
+ P(OBUF_1U);
+ P(OBUF_1V);
+ P(OSTRIDE);
+ P(YRGB_VPH);
+ P(UV_VPH);
+ P(HORZ_PH);
+ P(INIT_PHS);
+ P(DWINPOS);
+ P(DWINSZ);
+ P(SWIDTH);
+ P(SWIDTHSW);
+ P(SHEIGHT);
+ P(YRGBSCALE);
+ P(UVSCALE);
+ P(OCLRC0);
+ P(OCLRC1);
+ P(DCLRKV);
+ P(DCLRKM);
+ P(SCLRKVH);
+ P(SCLRKVL);
+ P(SCLRKEN);
+ P(OCONFIG);
+ P(OCMD);
+ P(OSTART_0Y);
+ P(OSTART_1Y);
+ P(OSTART_0U);
+ P(OSTART_0V);
+ P(OSTART_1U);
+ P(OSTART_1V);
+ P(OTILEOFF_0Y);
+ P(OTILEOFF_1Y);
+ P(OTILEOFF_0U);
+ P(OTILEOFF_0V);
+ P(OTILEOFF_1U);
+ P(OTILEOFF_1V);
+ P(FASTHSCALE);
+ P(UVSCALEV);
+#undef P
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
new file mode 100644
index 00000000000..12b02fe1d0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -0,0 +1,1221 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
+#include "intel_drv.h"
+
+void
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ drm_mode_copy(adjusted_mode, fixed_mode);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+}
+
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *scan, *tmp_mode;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ tmp_mode = NULL;
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ tmp_mode = scan;
+ }
+ }
+ }
+
+ if (temp_downclock < fixed_mode->clock)
+ return drm_mode_duplicate(dev, tmp_mode);
+ else
+ return NULL;
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode)
+{
+ struct drm_display_mode *adjusted_mode;
+ int x, y, width, height;
+
+ adjusted_mode = &pipe_config->adjusted_mode;
+
+ x = y = width = height = 0;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+ adjusted_mode->vdisplay == pipe_config->pipe_src_h)
+ goto done;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = pipe_config->pipe_src_w;
+ height = pipe_config->pipe_src_h;
+ x = (adjusted_mode->hdisplay - width + 1)/2;
+ y = (adjusted_mode->vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->hdisplay
+ * pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w
+ * adjusted_mode->vdisplay;
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / pipe_config->pipe_src_h;
+ if (width & 1)
+ width++;
+ x = (adjusted_mode->hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / pipe_config->pipe_src_w;
+ if (height & 1)
+ height++;
+ y = (adjusted_mode->vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ }
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ break;
+
+ default:
+ WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+ return;
+ }
+
+done:
+ pipe_config->pch_pfit.pos = (x << 16) | y;
+ pipe_config->pch_pfit.size = (width << 16) | height;
+ pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ mode->crtc_hdisplay = width;
+ mode->crtc_hblank_start = width + border;
+ mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+ mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+ mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->vdisplay - height + 1) / 2;
+
+ mode->crtc_vdisplay = height;
+ mode->crtc_vblank_start = height + border;
+ mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+ mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+ mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+ u32 *pfit_control)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ u32 scaled_width = adjusted_mode->hdisplay *
+ pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w *
+ adjusted_mode->vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
+ *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+ u32 *pfit_control, u32 *pfit_pgm_ratios,
+ u32 *border)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ u32 scaled_width = adjusted_mode->hdisplay *
+ pipe_config->pipe_src_h;
+ u32 scaled_height = pipe_config->pipe_src_w *
+ adjusted_mode->vdisplay;
+ u32 bits;
+
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode,
+ scaled_height /
+ pipe_config->pipe_src_h);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
+ bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+ adjusted_mode->vdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode,
+ scaled_width /
+ pipe_config->pipe_src_w);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
+ bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+ adjusted_mode->hdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+}
+
+void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ struct drm_display_mode *adjusted_mode;
+
+ adjusted_mode = &pipe_config->adjusted_mode;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+ adjusted_mode->vdisplay == pipe_config->pipe_src_h)
+ goto out;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
+ centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
+ border = LVDS_BORDER_ENABLE;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ if (INTEL_INFO(dev)->gen >= 4)
+ i965_scale_aspect(pipe_config, &pfit_control);
+ else
+ i9xx_scale_aspect(pipe_config, &pfit_control,
+ &pfit_pgm_ratios, &border);
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
+ pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
+ break;
+ default:
+ WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+ return;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ /* FIXME: handle multiple panels by failing gracefully */
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY);
+
+out:
+ if ((pfit_control & PFIT_ENABLE) == 0) {
+ pfit_control = 0;
+ pfit_pgm_ratios = 0;
+ }
+
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ pipe_config->gmch_pfit.control = pfit_control;
+ pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+ pipe_config->gmch_pfit.lvds_border_bits = border;
+}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ switch (i915.panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
+}
+
+static u32 intel_panel_compute_brightness(struct intel_connector *connector,
+ u32 val)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ if (i915.invert_brightness < 0)
+ return val;
+
+ if (i915.invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ return panel->backlight.max - val;
+ }
+
+ return val;
+}
+
+static u32 bdw_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 pch_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 i9xx_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 val;
+
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (INTEL_INFO(dev)->gen < 4)
+ val >>= 1;
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+ val *= lbpc;
+ }
+
+ return val;
+}
+
+static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 vlv_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+
+ return _vlv_get_backlight(dev, pipe);
+}
+
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ val = dev_priv->display.get_backlight(connector);
+ val = intel_panel_compute_brightness(connector, val);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+static void bdw_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+}
+
+static void pch_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+}
+
+static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp, mask;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ lbpc = level * 0xfe / panel->backlight.max + 1;
+ level /= lbpc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
+ }
+
+ if (IS_GEN4(dev)) {
+ mask = BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ level <<= 1;
+ mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL) & ~mask;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 tmp;
+
+ tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void
+intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+ level = intel_panel_compute_brightness(connector, level);
+ dev_priv->display.set_backlight(connector, level);
+}
+
+/* set backlight brightness to level in range [0..max] */
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+ u32 max)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 freq;
+ unsigned long flags;
+ u64 n;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ /* scale to hardware max, but be careful to not overflow */
+ freq = panel->backlight.max;
+ n = (u64)level * freq;
+ do_div(n, max);
+ level = n;
+
+ panel->backlight.level = level;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness = level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(connector, level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+static void pch_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BLC_PWM_CPU_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(struct intel_connector *connector)
+{
+ intel_panel_actually_set_backlight(connector, 0);
+}
+
+static void i965_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+}
+
+void intel_panel_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ /*
+ * Do not disable backlight on the vgaswitcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ panel->backlight.enabled = false;
+ dev_priv->display.disable_backlight(connector);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+static void bdw_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ DRM_DEBUG_KMS("pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ pch_ctl2 = panel->backlight.max << 16;
+ I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ /* BDW always uses the pch pwm controls. */
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ POSTING_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+static void pch_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ if (cpu_ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "cpu backlight already enabled\n");
+ cpu_ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ }
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ DRM_DEBUG_KMS("pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (cpu_transcoder == TRANSCODER_EDP)
+ cpu_ctl2 = BLM_TRANSCODER_EDP;
+ else
+ cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ POSTING_READ(BLC_PWM_CPU_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+ pch_ctl2 = panel->backlight.max << 16;
+ I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ POSTING_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, freq;
+
+ ctl = I915_READ(BLC_PWM_CTL);
+ if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+ WARN(1, "backlight already enabled\n");
+ I915_WRITE(BLC_PWM_CTL, 0);
+ }
+
+ freq = panel->backlight.max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 17;
+ if (panel->backlight.combination_mode)
+ ctl |= BLM_LEGACY_MODE;
+ if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
+ ctl |= BLM_POLARITY_PNV;
+
+ I915_WRITE(BLC_PWM_CTL, ctl);
+ POSTING_READ(BLC_PWM_CTL);
+
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+static void i965_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 ctl, ctl2, freq;
+
+ ctl2 = I915_READ(BLC_PWM_CTL2);
+ if (ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
+ }
+
+ freq = panel->backlight.max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 16;
+ I915_WRITE(BLC_PWM_CTL, ctl);
+
+ ctl2 = BLM_PIPE(pipe);
+ if (panel->backlight.combination_mode)
+ ctl2 |= BLM_COMBINATION_MODE;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
+ POSTING_READ(BLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+static void vlv_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 ctl, ctl2;
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ if (ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ }
+
+ ctl = panel->backlight.max << 16;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
+
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+ ctl2 = 0;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+}
+
+void intel_panel_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ if (panel->backlight.level == 0) {
+ panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ panel->backlight.level;
+ }
+
+ dev_priv->display.enable_backlight(connector);
+ panel->backlight.enabled = true;
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+static int intel_backlight_device_update_status(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct drm_device *dev = connector->base.dev;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
+ intel_panel_set_backlight(connector, bd->props.brightness,
+ bd->props.max_brightness);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return 0;
+}
+
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ intel_runtime_pm_get(dev_priv);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ ret = intel_panel_get_backlight(connector);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ intel_runtime_pm_put(dev_priv);
+
+ return ret;
+}
+
+static const struct backlight_ops intel_backlight_device_ops = {
+ .update_status = intel_backlight_device_update_status,
+ .get_brightness = intel_backlight_device_get_brightness,
+};
+
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+ struct backlight_properties props;
+
+ if (WARN_ON(panel->backlight.device))
+ return -ENODEV;
+
+ BUG_ON(panel->backlight.max == 0);
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.brightness = panel->backlight.level;
+ props.max_brightness = panel->backlight.max;
+
+ /*
+ * Note: using the same name independent of the connector prevents
+ * registration of multiple backlight devices in the driver.
+ */
+ panel->backlight.device =
+ backlight_device_register("intel_backlight",
+ connector->base.kdev,
+ connector,
+ &intel_backlight_device_ops, &props);
+
+ if (IS_ERR(panel->backlight.device)) {
+ DRM_ERROR("Failed to register backlight: %ld\n",
+ PTR_ERR(panel->backlight.device));
+ panel->backlight.device = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.device) {
+ backlight_device_unregister(panel->backlight.device);
+ panel->backlight.device = NULL;
+ }
+}
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ *
+ * XXX: Query mode clock or hardware clock and program PWM modulation frequency
+ * appropriately when it's 0. Use VBT and/or sane defaults.
+ */
+static int bdw_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2, val;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ panel->backlight.max = pch_ctl2 >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = bdw_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ panel->backlight.max = pch_ctl2 >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = pch_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, val;
+
+ ctl = I915_READ(BLC_PWM_CTL);
+
+ if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
+ panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+ if (IS_PINEVIEW(dev))
+ panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+ panel->backlight.max = ctl >> 17;
+ if (panel->backlight.combination_mode)
+ panel->backlight.max *= 0xff;
+
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = i9xx_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2, val;
+
+ ctl2 = I915_READ(BLC_PWM_CTL2);
+ panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = I915_READ(BLC_PWM_CTL);
+ panel->backlight.max = ctl >> 16;
+ if (panel->backlight.combination_mode)
+ panel->backlight.max *= 0xff;
+
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = i9xx_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe;
+ u32 ctl, ctl2, val;
+
+ for_each_pipe(pipe) {
+ u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+ /* Skip if the modulation freq is already set */
+ if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+ continue;
+
+ cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+ cur_val);
+ }
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+ panel->backlight.max = ctl >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = _vlv_get_backlight(dev, PIPE_A);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
+int intel_panel_setup_backlight(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_panel *panel = &intel_connector->panel;
+ unsigned long flags;
+ int ret;
+
+ if (!dev_priv->vbt.backlight.present) {
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ } else {
+ DRM_DEBUG_KMS("no backlight present per VBT\n");
+ return 0;
+ }
+ }
+
+ /* set level and max in panel struct */
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ ret = dev_priv->display.setup_backlight(intel_connector);
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+
+ if (ret) {
+ DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
+ connector->name);
+ return ret;
+ }
+
+ intel_backlight_device_register(intel_connector);
+
+ panel->backlight.present = true;
+
+ DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
+ "sysfs interface %sregistered\n",
+ panel->backlight.enabled ? "enabled" : "disabled",
+ panel->backlight.level, panel->backlight.max,
+ panel->backlight.device ? "" : "not ");
+
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_panel *panel = &intel_connector->panel;
+
+ panel->backlight.present = false;
+ intel_backlight_device_unregister(intel_connector);
+}
+
+/* Set up chip specific backlight functions */
+void intel_panel_init_backlight_funcs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_BROADWELL(dev)) {
+ dev_priv->display.setup_backlight = bdw_setup_backlight;
+ dev_priv->display.enable_backlight = bdw_enable_backlight;
+ dev_priv->display.disable_backlight = pch_disable_backlight;
+ dev_priv->display.set_backlight = bdw_set_backlight;
+ dev_priv->display.get_backlight = bdw_get_backlight;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.setup_backlight = pch_setup_backlight;
+ dev_priv->display.enable_backlight = pch_enable_backlight;
+ dev_priv->display.disable_backlight = pch_disable_backlight;
+ dev_priv->display.set_backlight = pch_set_backlight;
+ dev_priv->display.get_backlight = pch_get_backlight;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.setup_backlight = vlv_setup_backlight;
+ dev_priv->display.enable_backlight = vlv_enable_backlight;
+ dev_priv->display.disable_backlight = vlv_disable_backlight;
+ dev_priv->display.set_backlight = vlv_set_backlight;
+ dev_priv->display.get_backlight = vlv_get_backlight;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.setup_backlight = i965_setup_backlight;
+ dev_priv->display.enable_backlight = i965_enable_backlight;
+ dev_priv->display.disable_backlight = i965_disable_backlight;
+ dev_priv->display.set_backlight = i9xx_set_backlight;
+ dev_priv->display.get_backlight = i9xx_get_backlight;
+ } else {
+ dev_priv->display.setup_backlight = i9xx_setup_backlight;
+ dev_priv->display.enable_backlight = i9xx_enable_backlight;
+ dev_priv->display.disable_backlight = i9xx_disable_backlight;
+ dev_priv->display.set_backlight = i9xx_set_backlight;
+ dev_priv->display.get_backlight = i9xx_get_backlight;
+ }
+}
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *downclock_mode)
+{
+ panel->fixed_mode = fixed_mode;
+ panel->downclock_mode = downclock_mode;
+
+ return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+ struct intel_connector *intel_connector =
+ container_of(panel, struct intel_connector, panel);
+
+ if (panel->fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+
+ if (panel->downclock_mode)
+ drm_mode_destroy(intel_connector->base.dev,
+ panel->downclock_mode);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 00000000000..ee72807069e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,6612 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
+#include <linux/vgaarb.h>
+#include <drm/i915_powerwell.h>
+#include <linux/pm_runtime.h>
+
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int i;
+ u32 fbc_ctl;
+
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 32B or 64B units */
+ if (IS_GEN2(dev))
+ cfb_pitch = (cfb_pitch / 32) - 1;
+ else
+ cfb_pitch = (cfb_pitch / 64) - 1;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ if (IS_GEN4(dev)) {
+ u32 fbc_ctl2;
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ }
+
+ /* enable it... */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
+ cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+
+ /* Blitter is part of Media powerwell on VLV. No impact of
+ * his param in other platforms for now */
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN;
+ if (IS_GEN5(dev))
+ dpfc_ctl |= obj->fence_reg;
+
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void gen7_enable_fbc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ } else {
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ HSW_FBCQ_DIS);
+ }
+
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+
+ sandybridge_blit_fbc_update(dev);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc.fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->primary->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc);
+
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
+ }
+
+ dev_priv->fbc.fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc.fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc.fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc.fbc_work = NULL;
+}
+
+static void intel_enable_fbc(struct drm_crtc *crtc)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
+ dev_priv->display.enable_fbc(crtc);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->primary->fb;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc.fbc_work = work;
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->fbc.plane = -1;
+}
+
+static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+ enum no_fbc_reason reason)
+{
+ if (dev_priv->fbc.no_fbc_reason == reason)
+ return false;
+
+ dev_priv->fbc.no_fbc_reason = reason;
+ return true;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= max_hdisplay in width, max_vdisplay in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_update_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ const struct drm_display_mode *adjusted_mode;
+ unsigned int max_width, max_height;
+
+ if (!HAS_FBC(dev)) {
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
+ return;
+ }
+
+ if (!i915.powersave) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ return;
+ }
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ for_each_crtc(dev, tmp_crtc) {
+ if (intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (crtc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->primary->fb == NULL) {
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->primary->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+ adjusted_mode = &intel_crtc->config.adjusted_mode;
+
+ if (i915.enable_fbc < 0) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
+ }
+ if (!i915.enable_fbc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ goto out_disable;
+ }
+ if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ goto out_disable;
+ }
+
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ max_width = 4096;
+ max_height = 2048;
+ } else {
+ max_width = 2048;
+ max_height = 1536;
+ }
+ if (intel_crtc->config.pipe_src_w > max_width ||
+ intel_crtc->config.pipe_src_h > max_height) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ goto out_disable;
+ }
+ if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+ intel_crtc->plane != PLANE_A) {
+ if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
+ DRM_DEBUG_KMS("plane not A, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc);
+ dev_priv->fbc.no_fbc_reason = FBC_OK;
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+ i915_gem_stolen_cleanup_compression(dev);
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->ips.r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->ips.c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->ips.c_m = 1;
+ } else {
+ dev_priv->ips.c_m = 2;
+ }
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+ VALLEYVIEW_FIFO_SIZE,
+ VALLEYVIEW_MAX_WM,
+ VALLEYVIEW_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ VALLEYVIEW_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i845_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ for_each_crtc(dev, crtc) {
+ if (intel_crtc_active(crtc)) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_crtc *unused_crtc)
+{
+ struct drm_device *dev = unused_crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ const struct drm_display_mode *adjusted_mode;
+ int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int clock;
+
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc)) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->crtc_htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = max(htotal * 1000 / clock, 1);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->crtc_htotal;
+ hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+
+ line_time_us = max(htotal * 1000 / clock, 1);
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+ int plane,
+ int *plane_prec_mult,
+ int *plane_dl,
+ int *cursor_prec_mult,
+ int *cursor_dl)
+{
+ struct drm_crtc *crtc;
+ int clock, pixel_size;
+ int entries;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc))
+ return false;
+
+ clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
+ pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
+
+ entries = (clock / 1000) * pixel_size;
+ *plane_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+ pixel_size);
+
+ entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
+ *cursor_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+ return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_prec, planea_dl, planeb_prec, planeb_dl;
+ int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+ int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+ either 16 or 32 */
+
+ /* For plane A, Cursor A */
+ if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+ &cursor_prec_mult, &cursora_dl)) {
+ cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+ I915_WRITE(VLV_DDL1, cursora_prec |
+ (cursora_dl << DDL_CURSORA_SHIFT) |
+ planea_prec | planea_dl);
+ }
+
+ /* For plane B, Cursor B */
+ if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+ &cursor_prec_mult, &cursorb_dl)) {
+ cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+ I915_WRITE(VLV_DDL2, cursorb_prec |
+ (cursorb_dl << DDL_CURSORB_SHIFT) |
+ planeb_prec | planeb_dl);
+ }
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void valleyview_update_wm(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
+ unsigned int enabled = 0;
+
+ vlv_update_drain_latency(dev);
+
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1 << PIPE_A;
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 1 << PIPE_B;
+
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
+ I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ } else {
+ I915_WRITE(FW_BLC_SELF_VLV,
+ I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void g4x_update_wm(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1 << PIPE_A;
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 1 << PIPE_B;
+
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr)) {
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_crtc *unused_crtc)
+{
+ struct drm_device *dev = unused_crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ const struct drm_display_mode *adjusted_mode =
+ &to_intel_crtc(crtc)->config.adjusted_mode;
+ int clock = adjusted_mode->crtc_clock;
+ int htotal = adjusted_mode->crtc_htotal;
+ int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = max(htotal * 1000 / clock, 1);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * to_intel_crtc(crtc)->cursor_width;
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_crtc *unused_crtc)
+{
+ struct drm_device *dev = unused_crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i830_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_display_mode *adjusted_mode;
+ int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+ wm_info, fifo_size, cpp,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_display_mode *adjusted_mode;
+ int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+ wm_info, fifo_size, cpp,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ if (IS_I915GM(dev) && enabled) {
+ struct intel_framebuffer *fb;
+
+ fb = to_intel_framebuffer(enabled->primary->fb);
+
+ /* self-refresh seems busted with untiled */
+ if (fb->obj->tiling_mode == I915_TILING_NONE)
+ enabled = NULL;
+ }
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ const struct drm_display_mode *adjusted_mode =
+ &to_intel_crtc(enabled)->config.adjusted_mode;
+ int clock = adjusted_mode->crtc_clock;
+ int htotal = adjusted_mode->crtc_htotal;
+ int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
+ int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = max(htotal * 1000 / clock, 1);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+static void i845_update_wm(struct drm_crtc *unused_crtc)
+{
+ struct drm_device *dev = unused_crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct drm_display_mode *adjusted_mode;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+ planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+ &i845_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ 4, latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pixel_rate;
+
+ pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
+
+ /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
+ * adjust the pixel_rate here. */
+
+ if (intel_crtc->config.pch_pfit.enabled) {
+ uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+ uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
+
+ pipe_w = intel_crtc->config.pipe_src_w;
+ pipe_h = intel_crtc->config.pipe_src_h;
+ pfit_w = (pfit_size >> 16) & 0xFFFF;
+ pfit_h = pfit_size & 0xFFFF;
+ if (pipe_w < pfit_w)
+ pipe_w = pfit_w;
+ if (pipe_h < pfit_h)
+ pipe_h = pfit_h;
+
+ pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+ pfit_w * pfit_h);
+ }
+
+ return pixel_rate;
+}
+
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint64_t ret;
+
+ if (WARN(latency == 0, "Latency value missing\n"))
+ return UINT_MAX;
+
+ ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
+ ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
+
+ return ret;
+}
+
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+ uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t ret;
+
+ if (WARN(latency == 0, "Latency value missing\n"))
+ return UINT_MAX;
+
+ ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+ ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+ return ret;
+}
+
+static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
+ uint8_t bytes_per_pixel)
+{
+ return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
+}
+
+struct ilk_pipe_wm_parameters {
+ bool active;
+ uint32_t pipe_htotal;
+ uint32_t pixel_rate;
+ struct intel_plane_wm_parameters pri;
+ struct intel_plane_wm_parameters spr;
+ struct intel_plane_wm_parameters cur;
+};
+
+struct ilk_wm_maximums {
+ uint16_t pri;
+ uint16_t spr;
+ uint16_t cur;
+ uint16_t fbc;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
+ uint32_t mem_value,
+ bool is_lp)
+{
+ uint32_t method1, method2;
+
+ if (!params->active || !params->pri.enabled)
+ return 0;
+
+ method1 = ilk_wm_method1(params->pixel_rate,
+ params->pri.bytes_per_pixel,
+ mem_value);
+
+ if (!is_lp)
+ return method1;
+
+ method2 = ilk_wm_method2(params->pixel_rate,
+ params->pipe_htotal,
+ params->pri.horiz_pixels,
+ params->pri.bytes_per_pixel,
+ mem_value);
+
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
+ uint32_t mem_value)
+{
+ uint32_t method1, method2;
+
+ if (!params->active || !params->spr.enabled)
+ return 0;
+
+ method1 = ilk_wm_method1(params->pixel_rate,
+ params->spr.bytes_per_pixel,
+ mem_value);
+ method2 = ilk_wm_method2(params->pixel_rate,
+ params->pipe_htotal,
+ params->spr.horiz_pixels,
+ params->spr.bytes_per_pixel,
+ mem_value);
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
+ uint32_t mem_value)
+{
+ if (!params->active || !params->cur.enabled)
+ return 0;
+
+ return ilk_wm_method2(params->pixel_rate,
+ params->pipe_htotal,
+ params->cur.horiz_pixels,
+ params->cur.bytes_per_pixel,
+ mem_value);
+}
+
+/* Only for WM_LP. */
+static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
+ uint32_t pri_val)
+{
+ if (!params->active || !params->pri.enabled)
+ return 0;
+
+ return ilk_wm_fbc(pri_val,
+ params->pri.horiz_pixels,
+ params->pri.bytes_per_pixel);
+}
+
+static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen >= 8)
+ return 3072;
+ else if (INTEL_INFO(dev)->gen >= 7)
+ return 768;
+ else
+ return 512;
+}
+
+static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
+ int level, bool is_sprite)
+{
+ if (INTEL_INFO(dev)->gen >= 8)
+ /* BDW primary/sprite plane watermarks */
+ return level == 0 ? 255 : 2047;
+ else if (INTEL_INFO(dev)->gen >= 7)
+ /* IVB/HSW primary/sprite plane watermarks */
+ return level == 0 ? 127 : 1023;
+ else if (!is_sprite)
+ /* ILK/SNB primary plane watermarks */
+ return level == 0 ? 127 : 511;
+ else
+ /* ILK/SNB sprite plane watermarks */
+ return level == 0 ? 63 : 255;
+}
+
+static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
+ int level)
+{
+ if (INTEL_INFO(dev)->gen >= 7)
+ return level == 0 ? 63 : 255;
+ else
+ return level == 0 ? 31 : 63;
+}
+
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen >= 8)
+ return 31;
+ else
+ return 15;
+}
+
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ bool is_sprite)
+{
+ unsigned int fifo_size = ilk_display_fifo_size(dev);
+
+ /* if sprites aren't enabled, sprites get nothing */
+ if (is_sprite && !config->sprites_enabled)
+ return 0;
+
+ /* HSW allows LP1+ watermarks even with multiple pipes */
+ if (level == 0 || config->num_pipes_active > 1) {
+ fifo_size /= INTEL_INFO(dev)->num_pipes;
+
+ /*
+ * For some reason the non self refresh
+ * FIFO size is only half of the self
+ * refresh FIFO size on ILK/SNB.
+ */
+ if (INTEL_INFO(dev)->gen <= 6)
+ fifo_size /= 2;
+ }
+
+ if (config->sprites_enabled) {
+ /* level 0 is always calculated with 1:1 split */
+ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+ if (is_sprite)
+ fifo_size *= 5;
+ fifo_size /= 6;
+ } else {
+ fifo_size /= 2;
+ }
+ }
+
+ /* clamp to max that the registers can hold */
+ return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config)
+{
+ /* HSW LP1+ watermarks w/ multiple pipes */
+ if (level > 0 && config->num_pipes_active > 1)
+ return 64;
+
+ /* otherwise just report max that registers can hold */
+ return ilk_cursor_wm_reg_max(dev, level);
+}
+
+static void ilk_compute_wm_maximums(const struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(dev);
+}
+
+static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+ int level,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_reg_max(dev, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev);
+}
+
+static bool ilk_validate_wm_level(int level,
+ const struct ilk_wm_maximums *max,
+ struct intel_wm_level *result)
+{
+ bool ret;
+
+ /* already determined to be invalid? */
+ if (!result->enable)
+ return false;
+
+ result->enable = result->pri_val <= max->pri &&
+ result->spr_val <= max->spr &&
+ result->cur_val <= max->cur;
+
+ ret = result->enable;
+
+ /*
+ * HACK until we can pre-compute everything,
+ * and thus fail gracefully if LP0 watermarks
+ * are exceeded...
+ */
+ if (level == 0 && !result->enable) {
+ if (result->pri_val > max->pri)
+ DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+ level, result->pri_val, max->pri);
+ if (result->spr_val > max->spr)
+ DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+ level, result->spr_val, max->spr);
+ if (result->cur_val > max->cur)
+ DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+ level, result->cur_val, max->cur);
+
+ result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
+ result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
+ result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+ result->enable = true;
+ }
+
+ return ret;
+}
+
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct ilk_pipe_wm_parameters *p,
+ struct intel_wm_level *result)
+{
+ uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+ uint16_t spr_latency = dev_priv->wm.spr_latency[level];
+ uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+
+ /* WM1+ latency values stored in 0.5us units */
+ if (level > 0) {
+ pri_latency *= 5;
+ spr_latency *= 5;
+ cur_latency *= 5;
+ }
+
+ result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
+ result->spr_val = ilk_compute_spr_wm(p, spr_latency);
+ result->cur_val = ilk_compute_cur_wm(p, cur_latency);
+ result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
+ result->enable = true;
+}
+
+static uint32_t
+hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ u32 linetime, ips_linetime;
+
+ if (!intel_crtc_active(crtc))
+ return 0;
+
+ /* The WM are computed with base on how long it takes to fill a single
+ * row at the given clock rate, multiplied by 8.
+ * */
+ linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
+ mode->crtc_clock);
+ ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
+ PIPE_WM_LINETIME_TIME(linetime);
+}
+
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ uint64_t sskpd = I915_READ64(MCH_SSKPD);
+
+ wm[0] = (sskpd >> 56) & 0xFF;
+ if (wm[0] == 0)
+ wm[0] = sskpd & 0xF;
+ wm[1] = (sskpd >> 4) & 0xFF;
+ wm[2] = (sskpd >> 12) & 0xFF;
+ wm[3] = (sskpd >> 20) & 0x1FF;
+ wm[4] = (sskpd >> 32) & 0x1FF;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t sskpd = I915_READ(MCH_SSKPD);
+
+ wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
+ wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
+ wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
+ wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ uint32_t mltr = I915_READ(MLTR_ILK);
+
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
+ wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+ }
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ /* ILK sprite LP0 latency is 1300 ns */
+ if (INTEL_INFO(dev)->gen == 5)
+ wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ /* ILK cursor LP0 latency is 1300 ns */
+ if (INTEL_INFO(dev)->gen == 5)
+ wm[0] = 13;
+
+ /* WaDoubleCursorLP3Latency:ivb */
+ if (IS_IVYBRIDGE(dev))
+ wm[3] *= 2;
+}
+
+int ilk_wm_max_level(const struct drm_device *dev)
+{
+ /* how many WM levels are we expecting */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ return 4;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ return 3;
+ else
+ return 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+ const char *name,
+ const uint16_t wm[5])
+{
+ int level, max_level = ilk_wm_max_level(dev);
+
+ for (level = 0; level <= max_level; level++) {
+ unsigned int latency = wm[level];
+
+ if (latency == 0) {
+ DRM_ERROR("%s WM%d latency not provided\n",
+ name, level);
+ continue;
+ }
+
+ /* WM1+ latency values in 0.5us units */
+ if (level > 0)
+ latency *= 5;
+
+ DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
+ name, level, wm[level],
+ latency / 10, latency % 10);
+ }
+}
+
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5], uint16_t min)
+{
+ int level, max_level = ilk_wm_max_level(dev_priv->dev);
+
+ if (wm[0] >= min)
+ return false;
+
+ wm[0] = max(wm[0], min);
+ for (level = 1; level <= max_level; level++)
+ wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+
+ return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool changed;
+
+ /*
+ * The BIOS provided WM memory latency values are often
+ * inadequate for high resolution displays. Adjust them.
+ */
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
+ ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
+ ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+
+ if (!changed)
+ return;
+
+ DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+ intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
+static void ilk_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+
+ memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+ sizeof(dev_priv->wm.pri_latency));
+ memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+ sizeof(dev_priv->wm.pri_latency));
+
+ intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+
+ intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+
+ if (IS_GEN6(dev))
+ snb_wm_latency_quirk(dev);
+}
+
+static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
+ struct ilk_pipe_wm_parameters *p)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_plane *plane;
+
+ if (!intel_crtc_active(crtc))
+ return;
+
+ p->active = true;
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+ p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
+ p->cur.bytes_per_pixel = 4;
+ p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->cur.horiz_pixels = intel_crtc->cursor_width;
+ /* TODO: for now, assume primary and cursor planes are always enabled. */
+ p->pri.enabled = true;
+ p->cur.enabled = true;
+
+ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (intel_plane->pipe == pipe) {
+ p->spr = intel_plane->wm;
+ break;
+ }
+ }
+}
+
+static void ilk_compute_wm_config(struct drm_device *dev,
+ struct intel_wm_config *config)
+{
+ struct intel_crtc *intel_crtc;
+
+ /* Compute the currently _active_ config */
+ for_each_intel_crtc(dev, intel_crtc) {
+ const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
+
+ if (!wm->pipe_enabled)
+ continue;
+
+ config->sprites_enabled |= wm->sprites_enabled;
+ config->sprites_scaled |= wm->sprites_scaled;
+ config->num_pipes_active++;
+ }
+}
+
+/* Compute new watermarks for the pipe */
+static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
+ const struct ilk_pipe_wm_parameters *params,
+ struct intel_pipe_wm *pipe_wm)
+{
+ struct drm_device *dev = crtc->dev;
+ const struct drm_i915_private *dev_priv = dev->dev_private;
+ int level, max_level = ilk_wm_max_level(dev);
+ /* LP0 watermark maximums depend on this pipe alone */
+ struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = params->spr.enabled,
+ .sprites_scaled = params->spr.scaled,
+ };
+ struct ilk_wm_maximums max;
+
+ pipe_wm->pipe_enabled = params->active;
+ pipe_wm->sprites_enabled = params->spr.enabled;
+ pipe_wm->sprites_scaled = params->spr.scaled;
+
+ /* ILK/SNB: LP2+ watermarks only w/o sprites */
+ if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
+ max_level = 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+ if (params->spr.scaled)
+ max_level = 0;
+
+ ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+ /* At least LP0 must be valid */
+ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
+ return false;
+
+ ilk_compute_wm_reg_maximums(dev, 1, &max);
+
+ for (level = 1; level <= max_level; level++) {
+ struct intel_wm_level wm = {};
+
+ ilk_compute_wm_level(dev_priv, level, params, &wm);
+
+ /*
+ * Disable any watermark level that exceeds the
+ * register maximums since such watermarks are
+ * always invalid.
+ */
+ if (!ilk_validate_wm_level(level, &max, &wm))
+ break;
+
+ pipe_wm->wm[level] = wm;
+ }
+
+ return true;
+}
+
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_device *dev,
+ int level,
+ struct intel_wm_level *ret_wm)
+{
+ const struct intel_crtc *intel_crtc;
+
+ ret_wm->enable = true;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ const struct intel_pipe_wm *active = &intel_crtc->wm.active;
+ const struct intel_wm_level *wm = &active->wm[level];
+
+ if (!active->pipe_enabled)
+ continue;
+
+ /*
+ * The watermark values may have been used in the past,
+ * so we must maintain them in the registers for some
+ * time even if the level is now disabled.
+ */
+ if (!wm->enable)
+ ret_wm->enable = false;
+
+ ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+ ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+ ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+ ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+ }
+}
+
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_device *dev,
+ const struct intel_wm_config *config,
+ const struct ilk_wm_maximums *max,
+ struct intel_pipe_wm *merged)
+{
+ int level, max_level = ilk_wm_max_level(dev);
+ int last_enabled_level = max_level;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+ if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ config->num_pipes_active > 1)
+ return;
+
+ /* ILK: FBC WM must be disabled always */
+ merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
+
+ /* merge each WM1+ level */
+ for (level = 1; level <= max_level; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ ilk_merge_wm_level(dev, level, wm);
+
+ if (level > last_enabled_level)
+ wm->enable = false;
+ else if (!ilk_validate_wm_level(level, max, wm))
+ /* make sure all following levels get disabled */
+ last_enabled_level = level - 1;
+
+ /*
+ * The spec says it is preferred to disable
+ * FBC WMs instead of disabling a WM level.
+ */
+ if (wm->fbc_val > max->fbc) {
+ if (wm->enable)
+ merged->fbc_wm_enabled = false;
+ wm->fbc_val = 0;
+ }
+ }
+
+ /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+ /*
+ * FIXME this is racy. FBC might get enabled later.
+ * What we should check here is whether FBC can be
+ * enabled sometime later.
+ */
+ if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+ for (level = 2; level <= max_level; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ wm->enable = false;
+ }
+ }
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+ /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+ return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ return 2 * level;
+ else
+ return dev_priv->wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_device *dev,
+ const struct intel_pipe_wm *merged,
+ enum intel_ddb_partitioning partitioning,
+ struct ilk_wm_values *results)
+{
+ struct intel_crtc *intel_crtc;
+ int level, wm_lp;
+
+ results->enable_fbc_wm = merged->fbc_wm_enabled;
+ results->partitioning = partitioning;
+
+ /* LP1+ register values */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ const struct intel_wm_level *r;
+
+ level = ilk_wm_lp_to_level(wm_lp, merged);
+
+ r = &merged->wm[level];
+
+ /*
+ * Maintain the watermark values even if the level is
+ * disabled. Doing otherwise could cause underruns.
+ */
+ results->wm_lp[wm_lp - 1] =
+ (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
+ (r->pri_val << WM1_LP_SR_SHIFT) |
+ r->cur_val;
+
+ if (r->enable)
+ results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ results->wm_lp[wm_lp - 1] |=
+ r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
+ else
+ results->wm_lp[wm_lp - 1] |=
+ r->fbc_val << WM1_LP_FBC_SHIFT;
+
+ /*
+ * Always set WM1S_LP_EN when spr_val != 0, even if the
+ * level is disabled. Doing otherwise could cause underruns.
+ */
+ if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ WARN_ON(wm_lp != 1);
+ results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
+ } else
+ results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+ }
+
+ /* LP0 register values */
+ for_each_intel_crtc(dev, intel_crtc) {
+ enum pipe pipe = intel_crtc->pipe;
+ const struct intel_wm_level *r =
+ &intel_crtc->wm.active.wm[0];
+
+ if (WARN_ON(!r->enable))
+ continue;
+
+ results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
+
+ results->wm_pipe[pipe] =
+ (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
+ (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
+ r->cur_val;
+ }
+}
+
+/* Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same. */
+static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
+{
+ int level, max_level = ilk_wm_max_level(dev);
+ int level1 = 0, level2 = 0;
+
+ for (level = 1; level <= max_level; level++) {
+ if (r1->wm[level].enable)
+ level1 = level;
+ if (r2->wm[level].enable)
+ level2 = level;
+ }
+
+ if (level1 == level2) {
+ if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
+ return r2;
+ else
+ return r1;
+ } else if (level1 > level2) {
+ return r1;
+ } else {
+ return r2;
+ }
+}
+
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
+ const struct ilk_wm_values *old,
+ const struct ilk_wm_values *new)
+{
+ unsigned int dirty = 0;
+ enum pipe pipe;
+ int wm_lp;
+
+ for_each_pipe(pipe) {
+ if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+ dirty |= WM_DIRTY_LINETIME(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+ dirty |= WM_DIRTY_PIPE(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+ }
+
+ if (old->enable_fbc_wm != new->enable_fbc_wm) {
+ dirty |= WM_DIRTY_FBC;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->partitioning != new->partitioning) {
+ dirty |= WM_DIRTY_DDB;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ /* LP1+ watermarks already deemed dirty, no need to continue */
+ if (dirty & WM_DIRTY_LP_ALL)
+ return dirty;
+
+ /* Find the lowest numbered LP1+ watermark in need of an update... */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+ old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+ break;
+ }
+
+ /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+ for (; wm_lp <= 3; wm_lp++)
+ dirty |= WM_DIRTY_LP(wm_lp);
+
+ return dirty;
+}
+
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+ unsigned int dirty)
+{
+ struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ bool changed = false;
+
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
+ previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
+ previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
+ previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+ changed = true;
+ }
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+
+ return changed;
+}
+
+/*
+ * The spec says we shouldn't write when we don't need, because every write
+ * causes WMs to be re-evaluated, expending some power.
+ */
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+ struct ilk_wm_values *results)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ unsigned int dirty;
+ uint32_t val;
+
+ dirty = ilk_compute_wm_dirty(dev, previous, results);
+ if (!dirty)
+ return;
+
+ _ilk_disable_lp_wm(dev_priv, dirty);
+
+ if (dirty & WM_DIRTY_PIPE(PIPE_A))
+ I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_B))
+ I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_C))
+ I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
+
+ if (dirty & WM_DIRTY_LINETIME(PIPE_A))
+ I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
+ if (dirty & WM_DIRTY_LINETIME(PIPE_B))
+ I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
+ if (dirty & WM_DIRTY_LINETIME(PIPE_C))
+ I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
+
+ if (dirty & WM_DIRTY_DDB) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ val = I915_READ(WM_MISC);
+ if (results->partitioning == INTEL_DDB_PART_1_2)
+ val &= ~WM_MISC_DATA_PARTITION_5_6;
+ else
+ val |= WM_MISC_DATA_PARTITION_5_6;
+ I915_WRITE(WM_MISC, val);
+ } else {
+ val = I915_READ(DISP_ARB_CTL2);
+ if (results->partitioning == INTEL_DDB_PART_1_2)
+ val &= ~DISP_DATA_PARTITION_5_6;
+ else
+ val |= DISP_DATA_PARTITION_5_6;
+ I915_WRITE(DISP_ARB_CTL2, val);
+ }
+ }
+
+ if (dirty & WM_DIRTY_FBC) {
+ val = I915_READ(DISP_ARB_CTL);
+ if (results->enable_fbc_wm)
+ val &= ~DISP_FBC_WM_DIS;
+ else
+ val |= DISP_FBC_WM_DIS;
+ I915_WRITE(DISP_ARB_CTL, val);
+ }
+
+ if (dirty & WM_DIRTY_LP(1) &&
+ previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+ I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+
+ if (INTEL_INFO(dev)->gen >= 7) {
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+ I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+ I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+ }
+
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
+ I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
+ I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
+ I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+ dev_priv->wm.hw = *results;
+}
+
+static bool ilk_disable_lp_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_update_wm(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct ilk_wm_maximums max;
+ struct ilk_pipe_wm_parameters params = {};
+ struct ilk_wm_values results = {};
+ enum intel_ddb_partitioning partitioning;
+ struct intel_pipe_wm pipe_wm = {};
+ struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ struct intel_wm_config config = {};
+
+ ilk_compute_wm_parameters(crtc, &params);
+
+ intel_compute_pipe_wm(crtc, &params, &pipe_wm);
+
+ if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+ return;
+
+ intel_crtc->wm.active = pipe_wm;
+
+ ilk_compute_wm_config(dev, &config);
+
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (INTEL_INFO(dev)->gen >= 7 &&
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+
+ best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ } else {
+ best_lp_wm = &lp_wm_1_2;
+ }
+
+ partitioning = (best_lp_wm == &lp_wm_1_2) ?
+ INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
+
+ ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+ ilk_write_wm_values(dev_priv, &results);
+}
+
+static void ilk_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enabled, bool scaled)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ intel_plane->wm.enabled = enabled;
+ intel_plane->wm.scaled = scaled;
+ intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.bytes_per_pixel = pixel_size;
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ */
+ if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
+ intel_wait_for_vblank(dev, intel_plane->pipe);
+
+ ilk_update_wm(crtc);
+}
+
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_wm *active = &intel_crtc->wm.active;
+ enum pipe pipe = intel_crtc->pipe;
+ static const unsigned int wm0_pipe_reg[] = {
+ [PIPE_A] = WM0_PIPEA_ILK,
+ [PIPE_B] = WM0_PIPEB_ILK,
+ [PIPE_C] = WM0_PIPEC_IVB,
+ };
+
+ hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+ active->pipe_enabled = intel_crtc_active(crtc);
+
+ if (active->pipe_enabled) {
+ u32 tmp = hw->wm_pipe[pipe];
+
+ /*
+ * For active pipes LP0 watermark is marked as
+ * enabled, and LP1+ watermaks as disabled since
+ * we can't really reverse compute them in case
+ * multiple pipes are active.
+ */
+ active->wm[0].enable = true;
+ active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+ active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+ active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+ active->linetime = hw->wm_linetime[pipe];
+ } else {
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /*
+ * For inactive pipes, all watermark levels
+ * should be marked as enabled but zeroed,
+ * which is what we'd compute them to.
+ */
+ for (level = 0; level <= max_level; level++)
+ active->wm[level].enable = true;
+ }
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct drm_crtc *crtc;
+
+ for_each_crtc(dev, crtc)
+ ilk_pipe_wm_get_hw_state(crtc);
+
+ hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+ hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+ hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+ hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ if (INTEL_INFO(dev)->gen >= 7) {
+ hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ }
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ else if (IS_IVYBRIDGE(dev))
+ hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+ hw->enable_fbc_wm =
+ !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(crtc);
+}
+
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enabled, bool scaled)
+{
+ struct drm_i915_private *dev_priv = plane->dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+ pixel_size, enabled, scaled);
+}
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_ggtt_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ return NULL;
+}
+
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ assert_spin_locked(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+static void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
+ spin_lock_irq(&mchdev_lock);
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+ dev_priv->ips.fstart = fstart;
+
+ dev_priv->ips.max_delay = fstart;
+ dev_priv->ips.min_delay = fmin;
+ dev_priv->ips.cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ mdelay(1);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->ips.last_time2);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->ips.fstart);
+ mdelay(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ mdelay(1);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+{
+ u32 limits;
+
+ /* Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt. */
+ limits = dev_priv->rps.max_freq_softlimit << 24;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= dev_priv->rps.min_freq_softlimit << 16;
+
+ return limits;
+}
+
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+ int new_power;
+
+ new_power = dev_priv->rps.power;
+ switch (dev_priv->rps.power) {
+ case LOW_POWER:
+ if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val == dev_priv->rps.min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val == dev_priv->rps.max_freq_softlimit)
+ new_power = HIGH_POWER;
+ if (new_power == dev_priv->rps.power)
+ return;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ /* Upclock if more than 95% busy over 16ms */
+ I915_WRITE(GEN6_RP_UP_EI, 12500);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+
+ /* Downclock if less than 85% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+
+ case BETWEEN:
+ /* Upclock if more than 90% busy over 13ms */
+ I915_WRITE(GEN6_RP_UP_EI, 10250);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+
+ /* Downclock if less than 75% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+
+ case HIGH_POWER:
+ /* Upclock if more than 85% busy over 10ms */
+ I915_WRITE(GEN6_RP_UP_EI, 8000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+
+ /* Downclock if less than 60% busy over 32ms */
+ I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+ break;
+ }
+
+ dev_priv->rps.power = new_power;
+ dev_priv->rps.last_adj = 0;
+}
+
+static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
+{
+ u32 mask = 0;
+
+ if (val > dev_priv->rps.min_freq_softlimit)
+ mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ if (val < dev_priv->rps.max_freq_softlimit)
+ mask |= GEN6_PM_RP_UP_THRESHOLD;
+
+ /* IVB and SNB hard hangs on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ */
+ if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (IS_GEN8(dev_priv->dev))
+ mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+ return ~mask;
+}
+
+/* gen6_set_rps is called to update the frequency request, but should also be
+ * called when the range (min_delay and max_delay) is modified so that we can
+ * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+ WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+
+ /* min/max delay may still have been modified so be sure to
+ * write the limits value.
+ */
+ if (val != dev_priv->rps.cur_freq) {
+ gen6_set_rps_thresholds(dev_priv, val);
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(val));
+ else
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ }
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
+ POSTING_READ(GEN6_RPNSWREQ);
+
+ dev_priv->rps.cur_freq = val;
+ trace_intel_gpu_freq_change(val * 50);
+}
+
+/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
+ *
+ * * If Gfx is Idle, then
+ * 1. Mask Turbo interrupts
+ * 2. Bring up Gfx clock
+ * 3. Change the freq to Rpn and wait till P-Unit updates freq
+ * 4. Clear the Force GFX CLK ON bit so that Gfx can down
+ * 5. Unmask Turbo interrupts
+*/
+static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Latest VLV doesn't need to force the gfx clock */
+ if (dev->pdev->revision >= 0xd) {
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ return;
+ }
+
+ /*
+ * When we are idle. Drop to min voltage state.
+ */
+
+ if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+ return;
+
+ /* Mask turbo interrupt so that they will not come in between */
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+
+ vlv_force_gfx_clock(dev_priv, true);
+
+ dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
+
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
+ dev_priv->rps.min_freq_softlimit);
+
+ if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
+ & GENFREQSTATUS) == 0, 5))
+ DRM_ERROR("timed out waiting for Punit\n");
+
+ vlv_force_gfx_clock(dev_priv, false);
+
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+}
+
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (IS_VALLEYVIEW(dev))
+ vlv_set_rps_idle(dev_priv);
+ else
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ dev_priv->rps.last_adj = 0;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void gen6_rps_boost(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+ else
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+ dev_priv->rps.last_adj = 0;
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+ WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+
+ DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq,
+ vlv_gpu_freq(dev_priv, val), val);
+
+ if (val != dev_priv->rps.cur_freq)
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
+ dev_priv->rps.cur_freq = val;
+ trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
+}
+
+static void gen8_disable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+ I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
+ ~dev_priv->pm_rps_events);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
+ * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
+ * gen8_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
+}
+
+static void gen6_disable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
+ ~dev_priv->pm_rps_events);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
+}
+
+static void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+
+ if (IS_BROADWELL(dev))
+ gen8_disable_rps_interrupts(dev);
+ else
+ gen6_disable_rps_interrupts(dev);
+}
+
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen6_disable_rps_interrupts(dev);
+}
+
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+ if (IS_VALLEYVIEW(dev)) {
+ if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
+ mode = GEN6_RC_CTL_RC6_ENABLE;
+ else
+ mode = 0;
+ }
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
+static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+{
+ /* No RC6 before Ironlake */
+ if (INTEL_INFO(dev)->gen < 5)
+ return 0;
+
+ /* RC6 is only on Ironlake mobile not on desktop */
+ if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+ return 0;
+
+ /* Respect the kernel parameter if it is set */
+ if (enable_rc6 >= 0) {
+ int mask;
+
+ if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+ mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
+ INTEL_RC6pp_ENABLE;
+ else
+ mask = INTEL_RC6_ENABLE;
+
+ if ((enable_rc6 & mask) != enable_rc6)
+ DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
+ enable_rc6 & mask, enable_rc6, mask);
+
+ return enable_rc6 & mask;
+ }
+
+ /* Disable RC6 on Ironlake */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ if (IS_IVYBRIDGE(dev))
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+
+ return INTEL_RC6_ENABLE;
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+ return i915.enable_rc6;
+}
+
+static void gen8_enable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void gen6_enable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
+{
+ /* All of these values are in units of 50MHz */
+ dev_priv->rps.cur_freq = 0;
+ /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ /* XXX: only BYT has a special efficient freq */
+ dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ /* hw_max = RP0 until we check for overclocking */
+ dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+}
+
+static void gen8_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ uint32_t rc6_mask = 0, rp_state_cap;
+ int unused;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* 1c & 1d: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ parse_rp_state_cap(dev_priv, rp_state_cap);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_ring(ring, dev_priv, unused)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /* 3: Enable RC6 */
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ intel_print_rc6_info(dev, rc6_mask);
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
+
+ /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+ /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
+
+ /* Docs recommend 900MHz, and 300 MHz respectively */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ dev_priv->rps.max_freq_softlimit << 24 |
+ dev_priv->rps.min_freq_softlimit << 16);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
+ I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* 5: Enable RPS */
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ /* 6: Ring frequency + overclocking (our driver does this later */
+
+ gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
+
+ gen8_enable_rps_interrupts(dev);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen6_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 rp_state_cap;
+ u32 gt_perf_status;
+ u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
+ u32 gtfifodbg;
+ int rc6_mode;
+ int i, ret;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ parse_rp_state_cap(dev_priv, rp_state_cap);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
+ else
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* Check if we are enabling RC6 */
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ /* We don't use those on Haswell */
+ if (!IS_HASWELL(dev)) {
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ }
+
+ intel_print_rc6_info(dev, rc6_mask);
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ /* Power down if completely idle for over 50ms */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+ if (ret)
+ DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+ (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
+ (pcu_mbox & 0xff) * 50);
+ dev_priv->rps.max_freq = pcu_mbox & 0xff;
+ }
+
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+
+ gen6_enable_rps_interrupts(dev);
+
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ if (IS_GEN6(dev) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void __gen6_update_ring_freq(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int min_freq = 15;
+ unsigned int gpu_freq;
+ unsigned int max_ia_freq, min_ring_freq;
+ int scaling_factor = 180;
+ struct cpufreq_policy *policy;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_ia_freq = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
+ max_ia_freq = tsc_khz;
+ }
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ min_ring_freq = I915_READ(DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ min_ring_freq = mult_frac(min_ring_freq, 8, 3);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
+ gpu_freq--) {
+ int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ /* max(2 * GT, DDR). NB: GT is 50MHz units */
+ ring_freq = max(min_ring_freq, gpu_freq);
+ } else if (IS_HASWELL(dev)) {
+ ring_freq = mult_frac(gpu_freq, 5, 4);
+ ring_freq = max(min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ /* On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz,
+ * just use the lowest ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ }
+
+ sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
+ }
+}
+
+void gen6_update_ring_freq(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+ return;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ __gen6_update_ring_freq(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp0;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+ /* Clamp to max */
+ rp0 = min_t(u32, rp0, 0xea);
+
+ return rp0;
+}
+
+static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpe;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+ return rpe;
+}
+
+int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+ return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
+}
+
+/* Check that the pctx buffer wasn't move under us. */
+static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
+{
+ unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+ WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
+ dev_priv->vlv_pctx->stolen->start);
+}
+
+static void valleyview_setup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *pctx;
+ unsigned long pctx_paddr;
+ u32 pcbr;
+ int pctx_size = 24*1024;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ pcbr = I915_READ(VLV_PCBR);
+ if (pcbr) {
+ /* BIOS set it up already, grab the pre-alloc'd space */
+ int pcbr_offset;
+
+ pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+ pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+ pcbr_offset,
+ I915_GTT_OFFSET_NONE,
+ pctx_size);
+ goto out;
+ }
+
+ /*
+ * From the Gunit register HAS:
+ * The Gfx driver is expected to program this register and ensure
+ * proper allocation within Gfx stolen memory. For example, this
+ * register should be programmed such than the PCBR range does not
+ * overlap with other ranges, such as the frame buffer, protected
+ * memory, or any other relevant ranges.
+ */
+ pctx = i915_gem_object_create_stolen(dev, pctx_size);
+ if (!pctx) {
+ DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ return;
+ }
+
+ pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+ I915_WRITE(VLV_PCBR, pctx_paddr);
+
+out:
+ dev_priv->vlv_pctx = pctx;
+}
+
+static void valleyview_cleanup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (WARN_ON(!dev_priv->vlv_pctx))
+ return;
+
+ drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+ dev_priv->vlv_pctx = NULL;
+}
+
+static void valleyview_init_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ valleyview_setup_pctx(dev);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
+
+ dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+{
+ valleyview_cleanup_pctx(dev);
+}
+
+static void valleyview_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 gtfifodbg, val, rc6_mode = 0;
+ int i;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ valleyview_check_pctx(dev_priv);
+
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+ gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ /* If VLV, Forcewake all wells, else re-direct to regular path */
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
+
+ /* allows RC6 residency counter to work */
+ I915_WRITE(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
+
+ intel_print_rc6_info(dev, rc6_mode);
+
+ I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+ DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq);
+
+ DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+ gen6_enable_rps_interrupts(dev);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->ips.renderctx) {
+ i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
+ drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+ dev_priv->ips.renderctx = NULL;
+ }
+
+ if (dev_priv->ips.pwrctx) {
+ i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
+ drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+ dev_priv->ips.pwrctx = NULL;
+ }
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->ips.renderctx == NULL)
+ dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->ips.pwrctx == NULL)
+ dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ bool was_interruptible;
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ret = ironlake_setup_rc6(dev);
+ if (ret)
+ return;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = intel_ring_begin(ring, 6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ dev_priv->mm.interruptible = was_interruptible;
+ return;
+ }
+
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_advance(ring);
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_ring_idle(ring);
+ dev_priv->mm.interruptible = was_interruptible;
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power savings\n");
+ ironlake_teardown_rc6(dev);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+ intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ assert_spin_locked(&mchdev_lock);
+
+ diff1 = now - dev_priv->ips.last_time1;
+
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->ips.chipset_power;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->ips.last_count1) {
+ diff = ~0UL - dev_priv->ips.last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->ips.last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->ips.c_m &&
+ cparams[i].t == dev_priv->ips.r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ diff = div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ ret = div_u64(ret, 10);
+
+ dev_priv->ips.last_count1 = total_count;
+ dev_priv->ips.last_time1 = now;
+
+ dev_priv->ips.chipset_power = ret;
+
+ return ret;
+}
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long val;
+
+ if (INTEL_INFO(dev)->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ struct drm_device *dev = dev_priv->dev;
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (INTEL_INFO(dev)->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
+}
+
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ assert_spin_locked(&mchdev_lock);
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->ips.last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->ips.last_count2) {
+ diff = ~0UL - dev_priv->ips.last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->ips.last_count2;
+ }
+
+ dev_priv->ips.last_count2 = count;
+ dev_priv->ips.last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ diff = div_u64(diff, diffms * 10);
+ dev_priv->ips.gfx_power = diff;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (INTEL_INFO(dev)->gen != 5)
+ return;
+
+ spin_lock_irq(&mchdev_lock);
+
+ __i915_update_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ assert_spin_locked(&mchdev_lock);
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->ips.corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ __i915_update_gfx_val(dev_priv);
+
+ return dev_priv->ips.gfx_power + state2;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long val;
+
+ if (INTEL_INFO(dev)->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+ dev_priv->ips.max_delay--;
+
+out_unlock:
+ spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+ dev_priv->ips.max_delay++;
+
+out_unlock:
+ spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_engine_cs *ring;
+ bool ret = false;
+ int i;
+
+ spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ for_each_ring(ring, dev_priv, i)
+ ret |= !list_empty(&ring->request_list);
+
+out_unlock:
+ spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock_irq(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->ips.max_delay = dev_priv->ips.fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock_irq(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+ /* We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values. */
+ spin_lock_irq(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ spin_unlock_irq(&mchdev_lock);
+
+ ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+ spin_lock_irq(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+void intel_init_gt_powersave(struct drm_device *dev)
+{
+ i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+
+ if (IS_VALLEYVIEW(dev))
+ valleyview_init_gt_powersave(dev);
+}
+
+void intel_cleanup_gt_powersave(struct drm_device *dev)
+{
+ if (IS_VALLEYVIEW(dev))
+ valleyview_cleanup_gt_powersave(dev);
+}
+
+void intel_disable_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Interrupts should be disabled already to avoid re-arming. */
+ WARN_ON(dev->irq_enabled);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_disable_drps(dev);
+ ironlake_disable_rc6(dev);
+ } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
+ intel_runtime_pm_put(dev_priv);
+
+ cancel_work_sync(&dev_priv->rps.work);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (IS_VALLEYVIEW(dev))
+ valleyview_disable_rps(dev);
+ else
+ gen6_disable_rps(dev);
+ dev_priv->rps.enabled = false;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ }
+}
+
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ rps.delayed_resume_work.work);
+ struct drm_device *dev = dev_priv->dev;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ if (IS_VALLEYVIEW(dev)) {
+ valleyview_enable_rps(dev);
+ } else if (IS_BROADWELL(dev)) {
+ gen8_enable_rps(dev);
+ __gen6_update_ring_freq(dev);
+ } else {
+ gen6_enable_rps(dev);
+ __gen6_update_ring_freq(dev);
+ }
+ dev_priv->rps.enabled = true;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE_M(dev)) {
+ mutex_lock(&dev->struct_mutex);
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ mutex_unlock(&dev->struct_mutex);
+ } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ /*
+ * PCU communication is slow and this doesn't need to be
+ * done at any specific time, so do this out of our fast path
+ * to make resume and init faster.
+ *
+ * We depend on the HW RC6 power context save/restore
+ * mechanism when entering D3 through runtime PM suspend. So
+ * disable RPM until RPS/RC6 is properly setup. We can only
+ * get here via the driver load/system resume/runtime resume
+ * paths, so the _noresume version is enough (and in case of
+ * runtime resume it's necessary).
+ */
+ if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ)))
+ intel_runtime_pm_get_noresume(dev_priv);
+ }
+}
+
+void intel_reset_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->rps.enabled = false;
+ intel_enable_gt_powersave(dev);
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void g4x_disable_trickle_feed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_primary_plane(dev_priv, pipe);
+ }
+}
+
+static void ilk_init_lp_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /*
+ * Required for FBC
+ * WaFbcDisableDpfcClockGating:ilk
+ */
+ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+
+ ilk_init_lp_watermarks(dev);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ilk */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ }
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+
+ /* WaDisableRenderCachePipelinedFlush:ilk */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:ilk */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+ g4x_disable_trickle_feed(dev);
+
+ ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t val;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+ PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+ PCH_CPUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* The below fixes the weird display corruption, a few pixels shifted
+ * downward, on (only) LVDS of some HP laptops with IVY.
+ */
+ for_each_pipe(pipe) {
+ val = I915_READ(TRANS_CHICKEN2(pipe));
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ if (dev_priv->vbt.fdi_rx_polarity_inverted)
+ val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
+ val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
+ I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ }
+ /* WADP0ClockGatingDisable */
+ for_each_pipe(pipe) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+ }
+}
+
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(MCH_SSKPD);
+ if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
+ DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
+ DRM_INFO("This can cause pipe underruns and display issues.\n");
+ DRM_INFO("Please upgrade your BIOS to fix this.\n");
+ }
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch:snb */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+ /*
+ * BSpec recoomends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN6_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
+ ilk_init_lp_watermarks(dev);
+
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * WaDisableRCCUnitClockGating:snb
+ * WaDisableRCPBUnitClockGating:snb
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
+
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
+ * 3DSTATE_SF number of SF output attributes is more than 16."
+ */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ *
+ * WaFbcAsynchFlipDisableFbcQueue:snb
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+
+ g4x_disable_trickle_feed(dev);
+
+ cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ reg &= ~GEN7_FF_SCHED_MASK;
+ reg |= GEN7_FF_TS_SCHED_HW;
+ reg |= GEN7_FF_VS_SCHED_HW;
+ reg |= GEN7_FF_DS_SCHED_HW;
+
+ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * TODO: this bit should only be enabled when really needed, then
+ * disabled when not needed anymore in order to save power.
+ */
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ I915_WRITE(SOUTH_DSPCLK_GATE_D,
+ I915_READ(SOUTH_DSPCLK_GATE_D) |
+ PCH_LP_PARTITION_LEVEL_DISABLE);
+
+ /* WADPOClockGatingDisable:hsw */
+ I915_WRITE(_TRANSA_CHICKEN1,
+ I915_READ(_TRANSA_CHICKEN1) |
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+}
+
+static void lpt_suspend_hw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+}
+
+static void gen8_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* FIXME(BDW): Check all the w/a, some might only apply to
+ * pre-production hw. */
+
+ /* WaDisablePartialInstShootdown:bdw */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+
+ /* WaDisableThreadStallDopClockGating:bdw */
+ /* FIXME: Unclear whether we really need this on production bdw. */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+
+ /*
+ * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
+ * pre-production hardware
+ */
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
+
+ I915_WRITE(_3D_CHICKEN3,
+ _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+
+ I915_WRITE(COMMON_SLICE_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
+
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
+
+ /* WaDisableDopClockGating:bdw May not be needed for production */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* WaSwitchSolVfFArbitrationPriority:bdw */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+ /* WaPsrDPAMaskVBlankInSRD:bdw */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+
+ /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
+ for_each_pipe(pipe) {
+ I915_WRITE(CHICKEN_PIPESL_1(pipe),
+ I915_READ(CHICKEN_PIPESL_1(pipe)) |
+ BDW_DPRS_MASK_VBLANK_SRD);
+ }
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
+ */
+ I915_WRITE(HDC_CHICKEN0,
+ I915_READ(HDC_CHICKEN0) |
+ _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+
+ /* WaVSRefCountFullforceMissDisable:bdw */
+ /* WaDSRefCountFullforceMissDisable:bdw */
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) &
+ ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+ /* WaDisableSDEUnitClockGating:bdw */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:bdw */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+}
+
+static void haswell_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ ilk_init_lp_watermarks(dev);
+
+ /* L3 caching of data atomics doesn't work -- disable it. */
+ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+ I915_WRITE(HSW_ROW_CHICKEN3,
+ _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
+
+ /* This is required by WaCatErrorRejectionIssue:hsw */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ /* WaVSRefCountFullforceMissDisable:hsw */
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
+
+ /* WaDisable_RenderCache_OperationalFlush:hsw */
+ I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+ /* enable HiZ Raw Stall Optimization */
+ I915_WRITE(CACHE_MODE_0_GEN7,
+ _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+
+ /* WaDisable4x2SubspanOptimization:hsw */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
+ /* WaSwitchSolVfFArbitrationPriority:hsw */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+ /* WaRsPkgCStateDisplayPMReq:hsw */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+
+ lpt_init_clock_gating(dev);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t snpcr;
+
+ ilk_init_lp_watermarks(dev);
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableEarlyCull:ivb */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix:ivb */
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* WaDisablePSDDualDispatchEnable:ivb */
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:ivb */
+ I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode:ivb */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ else {
+ /* must write both registers */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ }
+
+ /* WaForceL3Serialization:ivb */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /*
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating:ivb workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ /* This is required by WaCatErrorRejectionIssue:ivb */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ g4x_disable_trickle_feed(dev);
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ I915_WRITE(CACHE_MODE_0_GEN7,
+ _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
+ }
+
+ /* WaDisable4x2SubspanOptimization:ivb */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ I915_WRITE(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= GEN6_MBC_SNPCR_MED;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ if (!HAS_PCH_NOP(dev))
+ cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ dev_priv->mem_freq = 800;
+ break;
+ case 2:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 3:
+ dev_priv->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+
+ dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
+ dev_priv->vlv_cdclk_freq);
+
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableEarlyCull:vlv */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix:vlv */
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* WaPsdDispatchEnable:vlv */
+ /* WaDisablePSDDualDispatchEnable:vlv */
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:vlv */
+ I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+ /* WaForceL3Serialization:vlv */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* WaDisableDopClockGating:vlv */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* This is required by WaCatErrorRejectionIssue:vlv */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /*
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating:vlv workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableL3Bank2xClockGate:vlv
+ * Disabling L3 clock gating- MMIO 940c[25] = 1
+ * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
+ I915_WRITE(GEN7_UCGCTL4,
+ I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * WaIncreaseL3CreditsForVLVB0:vlv
+ * This is the hardware default actually.
+ */
+ I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+
+ /*
+ * WaDisableVLVClockGating_VBIIssue:vlv
+ * Disable clock gating on th GCFG unit to prevent a delay
+ * in the reporting of vblank events.
+ */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+}
+
+static void cherryview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+
+ /* WaDisablePartialInstShootdown:chv */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+
+ /* WaDisableThreadStallDopClockGating:chv */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+
+ /* WaVSRefCountFullforceMissDisable:chv */
+ /* WaDSRefCountFullforceMissDisable:chv */
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) &
+ ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+ /* WaDisableSemaphoreAndSyncFlipWait:chv */
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+ /* WaDisableCSUnitClockGating:chv */
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSDEUnitClockGating:chv */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+
+ /* WaDisableGunitClockGating:chv (pre-production hw) */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
+ GINT_DIS);
+
+ /* WaDisableFfDopClockGating:chv (pre-production hw) */
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
+
+ /* WaDisableDopClockGating:chv (pre-production hw) */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:g4x */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
+ g4x_disable_trickle_feed(dev);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:gen4 */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:gen4 */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+
+ if (IS_PINEVIEW(dev))
+ I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+ /* interrupts should cause a wake up from C3 */
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+
+ /* interrupts should cause a wake up from C3 */
+ I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+ _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+}
+
+void intel_suspend_hw(struct drm_device *dev)
+{
+ if (HAS_PCH_LPT(dev))
+ lpt_suspend_hw(dev);
+}
+
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+ for (i = 0; \
+ i < (power_domains)->power_well_count && \
+ ((power_well) = &(power_domains)->power_wells[i]); \
+ i++) \
+ if ((power_well)->domains & (domain_mask))
+
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+ for (i = (power_domains)->power_well_count - 1; \
+ i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+ i--) \
+ if ((power_well)->domains & (domain_mask))
+
+/**
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ bool is_enabled;
+ int i;
+
+ if (dev_priv->pm.suspended)
+ return false;
+
+ power_domains = &dev_priv->power_domains;
+
+ is_enabled = true;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ if (power_well->always_on)
+ continue;
+
+ if (!power_well->hw_enabled) {
+ is_enabled = false;
+ break;
+ }
+ }
+
+ return is_enabled;
+}
+
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = intel_display_power_enabled_unlocked(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long irqflags;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ if (IS_BROADWELL(dev)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+ dev_priv->de_irq_mask[PIPE_B]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+ ~dev_priv->de_irq_mask[PIPE_B] |
+ GEN8_PIPE_VBLANK);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+ dev_priv->de_irq_mask[PIPE_C]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+ ~dev_priv->de_irq_mask[PIPE_C] |
+ GEN8_PIPE_VBLANK);
+ POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+}
+
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
+
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_ENABLE_REQUEST);
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE_ENABLED), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ }
+
+ hsw_power_well_post_enable(dev_priv);
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
+ }
+ }
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+ /*
+ * We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now.
+ */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+void __vlv_set_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id, bool enable)
+{
+ struct drm_device *dev = dev_priv->dev;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+ enum pipe pipe;
+
+ if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (enable) {
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ } else {
+ for_each_pipe(pipe)
+ assert_pll_disabled(dev_priv, pipe);
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
+ ~DPIO_CMNRST);
+ }
+ }
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+ PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ enum punit_power_well power_well_id = power_well->data;
+
+ __vlv_set_power_well(dev_priv, power_well_id, enable);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int power_well_id = power_well->data;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+ state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv->dev);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+ if (power_well->always_on || !i915.disable_power_well) {
+ if (!enabled)
+ goto mismatch;
+
+ return;
+ }
+
+ if (enabled != (power_well->count > 0))
+ goto mismatch;
+
+ return;
+
+mismatch:
+ WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+ power_well->name, power_well->always_on, enabled,
+ power_well->count, i915.disable_power_well);
+}
+
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ intel_runtime_pm_get(dev_priv);
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++) {
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+
+ mutex_unlock(&power_domains->lock);
+}
+
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ WARN_ON(!power_domains->domain_use_count[domain]);
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ WARN_ON(!power_well->count);
+
+ if (!--power_well->count && i915.disable_power_well) {
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
+ power_well->ops->disable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static struct i915_power_domains *hsw_pwr;
+
+/* Display audio driver power well request */
+int i915_request_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_request_power_well);
+
+/* Display audio driver power well release */
+int i915_release_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_release_power_well);
+
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
+
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
+ HSW_ALWAYS_ON_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_always_on_power_well_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static struct i915_power_well i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &vlv_dpio_power_well_ops,
+ },
+};
+
+#define set_power_wells(power_domains, __power_wells) ({ \
+ (power_domains)->power_wells = (__power_wells); \
+ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
+})
+
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_init(&power_domains->lock);
+
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_HASWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, hsw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_BROADWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, bdw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, vlv_power_wells);
+ } else {
+ set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
+
+ return 0;
+}
+
+void intel_power_domains_remove(struct drm_i915_private *dev_priv)
+{
+ hsw_pwr = NULL;
+}
+
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ power_well->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ }
+ mutex_unlock(&power_domains->lock);
+}
+
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ power_domains->initializing = true;
+ /* For now, we need the power well to be always enabled. */
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_resume(dev_priv);
+ power_domains->initializing = false;
+}
+
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_get(dev_priv);
+}
+
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_put(dev_priv);
+}
+
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_get_sync(device);
+ WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+ pm_runtime_get_noresume(device);
+}
+
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_put_autosuspend(device);
+}
+
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_set_active(device);
+
+ /*
+ * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+ * requirement.
+ */
+ if (!intel_enable_rc6(dev)) {
+ DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+ return;
+ }
+
+ pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_use_autosuspend(device);
+
+ pm_runtime_put_autosuspend(device);
+}
+
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ if (!intel_enable_rc6(dev))
+ return;
+
+ /* Make sure we're not suspended first. */
+ pm_runtime_get_sync(device);
+ pm_runtime_disable(device);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FBC(dev)) {
+ if (INTEL_INFO(dev)->gen >= 7) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = gen7_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+ }
+ }
+
+ /* For cxsr */
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ ilk_setup_wm_latency(dev);
+
+ if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+ dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
+ (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
+ dev_priv->display.update_wm = ilk_update_wm;
+ dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ }
+
+ if (IS_GEN5(dev))
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ else if (IS_GEN6(dev))
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ else if (IS_IVYBRIDGE(dev))
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ else if (IS_HASWELL(dev))
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ else if (INTEL_INFO(dev)->gen == 8)
+ dev_priv->display.init_clock_gating = gen8_init_clock_gating;
+ } else if (IS_CHERRYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ cherryview_init_clock_gating;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ valleyview_init_clock_gating;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_GEN2(dev)) {
+ if (INTEL_INFO(dev)->num_pipes == 1) {
+ dev_priv->display.update_wm = i845_update_wm;
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ } else {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ if (IS_I85X(dev) || IS_I865G(dev))
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ else
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ } else {
+ DRM_ERROR("unexpected fall-through in intel_init_pm\n");
+ }
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ *val = I915_READ(GEN6_PCODE_DATA);
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
+
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div;
+
+ /* 4 x czclk */
+ switch (dev_priv->mem_freq) {
+ case 800:
+ div = 10;
+ break;
+ case 1066:
+ div = 12;
+ break;
+ case 1333:
+ div = 16;
+ break;
+ default:
+ return -1;
+ }
+
+ return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
+}
+
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int mul;
+
+ /* 4 x czclk */
+ switch (dev_priv->mem_freq) {
+ case 800:
+ mul = 10;
+ break;
+ case 1066:
+ mul = 12;
+ break;
+ case 1333:
+ mul = 16;
+ break;
+ default:
+ return -1;
+ }
+
+ return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
+}
+
+void intel_pm_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_init(&dev_priv->rps.hw_lock);
+
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
+
+ dev_priv->pm.suspended = false;
+ dev_priv->pm.irqs_disabled = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
new file mode 100644
index 00000000000..a5e783a9928
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_RENDERSTATE_H
+#define _INTEL_RENDERSTATE_H
+
+#include <linux/types.h>
+
+struct intel_renderstate_rodata {
+ const u32 *reloc;
+ const u32 reloc_items;
+ const u32 *batch;
+ const u32 batch_items;
+};
+
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+
+#define RO_RENDERSTATE(_g) \
+ const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
+ .reloc = gen ## _g ## _null_state_relocs, \
+ .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
+ .batch = gen ## _g ## _null_state_batch, \
+ .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
+ }
+
+#endif /* INTEL_RENDERSTATE_H */
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
new file mode 100644
index 00000000000..740538ad097
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -0,0 +1,289 @@
+#include "intel_renderstate.h"
+
+static const u32 gen6_null_state_relocs[] = {
+ 0x00000020,
+ 0x00000024,
+ 0x0000002c,
+ 0x000001e0,
+ 0x000001e4,
+};
+
+static const u32 gen6_null_state_batch[] = {
+ 0x69040000,
+ 0x790d0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x61020000,
+ 0x00000000,
+ 0x78050001,
+ 0x00000018,
+ 0x00000000,
+ 0x780d1002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000420,
+ 0x78150003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79100000,
+ 0x00000000,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x780e0002,
+ 0x00000441,
+ 0x00000401,
+ 0x00000401,
+ 0x78021002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000400,
+ 0x78130012,
+ 0x00400810,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140007,
+ 0x00000280,
+ 0x08080000,
+ 0x00000000,
+ 0x00060000,
+ 0x4e080002,
+ 0x00100400,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11330000,
+ 0x02850004,
+ 0x11220000,
+ 0x78011002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000200,
+ 0x78080003,
+ 0x00002000,
+ 0x00000448, /* reloc */
+ 0x00000448, /* reloc */
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000220, /* state start */
+ 0x00000240,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0060005a,
+ 0x204077be,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x206077be,
+ 0x000000c0,
+ 0x008d0080,
+ 0x0060005a,
+ 0x208077be,
+ 0x000000d0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x20a077be,
+ 0x000000d0,
+ 0x008d0080,
+ 0x00000201,
+ 0x20080061,
+ 0x00000000,
+ 0x00000000,
+ 0x00600001,
+ 0x20200022,
+ 0x008d0000,
+ 0x00000000,
+ 0x02800031,
+ 0x21c01cc9,
+ 0x00000020,
+ 0x0a8a0001,
+ 0x00600001,
+ 0x204003be,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00600001,
+ 0x206003be,
+ 0x008d01e0,
+ 0x00000000,
+ 0x00600001,
+ 0x208003be,
+ 0x008d0200,
+ 0x00000000,
+ 0x00600001,
+ 0x20a003be,
+ 0x008d0220,
+ 0x00000000,
+ 0x00600001,
+ 0x20c003be,
+ 0x008d0240,
+ 0x00000000,
+ 0x00600001,
+ 0x20e003be,
+ 0x008d0260,
+ 0x00000000,
+ 0x00600001,
+ 0x210003be,
+ 0x008d0280,
+ 0x00000000,
+ 0x00600001,
+ 0x212003be,
+ 0x008d02a0,
+ 0x00000000,
+ 0x05800031,
+ 0x24001cc8,
+ 0x00000040,
+ 0x90019000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30000000,
+ 0x00000124,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80000031,
+ 0x00000003,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(6);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
new file mode 100644
index 00000000000..6fa7ff2a129
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -0,0 +1,253 @@
+#include "intel_renderstate.h"
+
+static const u32 gen7_null_state_relocs[] = {
+ 0x0000000c,
+ 0x00000010,
+ 0x00000018,
+ 0x000001ec,
+};
+
+static const u32 gen7_null_state_batch[] = {
+ 0x69040000,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x790d0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x79160000,
+ 0x00000008,
+ 0x78300000,
+ 0x02010040,
+ 0x78310000,
+ 0x04000000,
+ 0x78320000,
+ 0x04000000,
+ 0x78330000,
+ 0x02000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78210000,
+ 0x00000000,
+ 0x78130005,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140001,
+ 0x20000800,
+ 0x00000000,
+ 0x781e0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x78240000,
+ 0x00000240,
+ 0x78230000,
+ 0x00000260,
+ 0x782f0000,
+ 0x00000280,
+ 0x781f000c,
+ 0x00400810,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78200006,
+ 0x000002c0,
+ 0x08080000,
+ 0x00000000,
+ 0x28000402,
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11230000,
+ 0x02f60004,
+ 0x11230000,
+ 0x78080003,
+ 0x00006008,
+ 0x00000340, /* reloc */
+ 0xffffffff,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000360,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x7b000005,
+ 0x0000000f,
+ 0x00000003,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000031, /* state start */
+ 0x00000003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000492,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0080005a,
+ 0x2e2077bd,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0080005a,
+ 0x2e6077bd,
+ 0x000000d0,
+ 0x008d0040,
+ 0x02800031,
+ 0x21801fa9,
+ 0x008d0e20,
+ 0x08840001,
+ 0x00800001,
+ 0x2e2003bd,
+ 0x008d0180,
+ 0x00000000,
+ 0x00800001,
+ 0x2e6003bd,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00800001,
+ 0x2ea003bd,
+ 0x008d0200,
+ 0x00000000,
+ 0x00800001,
+ 0x2ee003bd,
+ 0x008d0240,
+ 0x00000000,
+ 0x05800031,
+ 0x20001fa8,
+ 0x008d0e20,
+ 0x90031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000380,
+ 0x000003a0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(7);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
new file mode 100644
index 00000000000..5c875615d42
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -0,0 +1,479 @@
+#include "intel_renderstate.h"
+
+static const u32 gen8_null_state_relocs[] = {
+ 0x00000048,
+ 0x00000050,
+ 0x00000060,
+ 0x000003ec,
+};
+
+static const u32 gen8_null_state_batch[] = {
+ 0x69040000,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x6101000e,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0xfffff001,
+ 0x00001001,
+ 0xfffff001,
+ 0x00001001,
+ 0x78230000,
+ 0x000006e0,
+ 0x78210000,
+ 0x00000700,
+ 0x78300000,
+ 0x08010040,
+ 0x78330000,
+ 0x08000000,
+ 0x78310000,
+ 0x08000000,
+ 0x78320000,
+ 0x08000000,
+ 0x78240000,
+ 0x00000641,
+ 0x780e0000,
+ 0x00000601,
+ 0x780d0000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x78260000,
+ 0x00000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781f0002,
+ 0x30400820,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00210000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000480,
+ 0x782f0000,
+ 0x00000540,
+ 0x78140000,
+ 0x00000800,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7820000a,
+ 0x00000580,
+ 0x00000000,
+ 0x08080000,
+ 0x00000000,
+ 0x00000000,
+ 0x1f000002,
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x784d0000,
+ 0x40000000,
+ 0x784f0000,
+ 0x80000100,
+ 0x780f0000,
+ 0x00000740,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000001,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x78080003,
+ 0x00006000,
+ 0x000005e0, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11230000,
+ 0x02850004,
+ 0x11230000,
+ 0x784b0000,
+ 0x0000000f,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x7b000005,
+ 0x00000000,
+ 0x00000003,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000004c0, /* state start */
+ 0x00000500,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000092,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0060005a,
+ 0x21403ae8,
+ 0x3a0000c0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x21603ae8,
+ 0x3a0000c0,
+ 0x008d0080,
+ 0x0060005a,
+ 0x21803ae8,
+ 0x3a0000d0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x21a03ae8,
+ 0x3a0000d0,
+ 0x008d0080,
+ 0x02800031,
+ 0x2e0022e8,
+ 0x0e000140,
+ 0x08840001,
+ 0x05800031,
+ 0x200022e0,
+ 0x0e000e00,
+ 0x90031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(8);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 00000000000..279488addf3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,2405 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+
+static inline int __ring_space(int head, int tail, int size)
+{
+ int space = head - (tail + I915_RING_FREE_SPACE);
+ if (space < 0)
+ space += size;
+ return space;
+}
+
+static inline int ring_space(struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
+}
+
+static bool intel_ring_stopped(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+}
+
+void __intel_ring_advance(struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ ringbuf->tail &= ringbuf->size - 1;
+ if (intel_ring_stopped(ring))
+ return;
+ ring->write_tail(ring, ringbuf->tail);
+}
+
+static int
+gen2_render_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ struct drm_device *dev = ring->dev;
+ u32 cmd;
+ int ret;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+static int
+intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+{
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ int ret;
+
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0); /* low dword */
+ intel_ring_emit(ring, 0); /* high dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen6_render_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
+{
+ int ret;
+
+ if (!ring->fbc_dirty)
+ return 0;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+ /* WaFbcNukeOn3DBlt:ivb/hsw */
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, MSG_FBC_REND_STATE);
+ intel_ring_emit(ring, value);
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit(ring, MSG_FBC_REND_STATE);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ intel_ring_advance(ring);
+
+ ring->fbc_dirty = false;
+ return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ if (!invalidate_domains && flush_domains)
+ return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
+
+ return 0;
+}
+
+static int
+gen8_render_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ int ret;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ }
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+
+}
+
+static void ring_write_tail(struct intel_engine_cs *ring,
+ u32 value)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ I915_WRITE_TAIL(ring, value);
+}
+
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u64 acthd;
+
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
+ RING_ACTHD_UDW(ring->mmio_base));
+ else if (INTEL_INFO(ring->dev)->gen >= 4)
+ acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+ else
+ acthd = I915_READ(ACTHD);
+
+ return acthd;
+}
+
+static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
+static bool stop_ring(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+ if (!IS_GEN2(ring->dev)) {
+ I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+ return false;
+ }
+ }
+
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(ring, 0);
+
+ if (!IS_GEN2(ring->dev)) {
+ (void)I915_READ_CTL(ring);
+ I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ }
+
+ return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
+
+static int init_ring_common(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_gem_object *obj = ringbuf->obj;
+ int ret = 0;
+
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ if (!stop_ring(ring)) {
+ /* G45 ring initialization often fails to reset head to zero */
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+
+ if (!stop_ring(ring)) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (I915_NEED_GFX_HWS(dev))
+ intel_ring_setup_status_page(ring);
+ else
+ ring_setup_phys_status_page(ring);
+
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE_CTL(ring,
+ ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
+ ring->name,
+ I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
+ I915_READ_HEAD(ring), I915_READ_TAIL(ring),
+ I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
+ else {
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = ring_space(ring);
+ ringbuf->last_retired_head = -1;
+ }
+
+ memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+
+out:
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+static int
+init_pipe_control(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ if (ring->scratch.obj)
+ return 0;
+
+ ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (ring->scratch.obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+ if (ret)
+ goto err_unref;
+
+ ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+ if (ret)
+ goto err_unref;
+
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+ ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+ if (ring->scratch.cpu_page == NULL) {
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+
+ DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+ ring->name, ring->scratch.gtt_offset);
+ return 0;
+
+err_unpin:
+ i915_gem_object_ggtt_unpin(ring->scratch.obj);
+err_unref:
+ drm_gem_object_unreference(&ring->scratch.obj->base);
+err:
+ return ret;
+}
+
+static int init_render_ring(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
+
+ /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+ if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ *
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ /* WaEnableFlushTlbInvalidationMode:snb */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
+
+ /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_GEN6(dev)) {
+ /* From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+ }
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+ if (HAS_L3_DPF(dev))
+ I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+
+ return ret;
+}
+
+static void render_ring_cleanup(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (ring->scratch.obj == NULL)
+ return;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ kunmap(sg_page(ring->scratch.obj->pages->sgl));
+ i915_gem_object_ggtt_unpin(ring->scratch.obj);
+ }
+
+ drm_gem_object_unreference(&ring->scratch.obj->base);
+ ring->scratch.obj = NULL;
+}
+
+static int gen6_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *useless;
+ int i, ret;
+
+ /* NB: In order to be able to do semaphore MBOX updates for varying
+ * number of rings, it's easiest if we round up each individual update
+ * to a multiple of 2 (since ring updates must always be a multiple of
+ * 2) even though the actual update only requires 3 dwords.
+ */
+#define MBOX_UPDATE_DWORDS 4
+ if (i915_semaphore_is_enabled(dev))
+ num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
+ else
+ return intel_ring_begin(signaller, num_dwords);
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+#undef MBOX_UPDATE_DWORDS
+
+ for_each_ring(useless, dev_priv, i) {
+ u32 mbox_reg = signaller->semaphore.mbox.signal[i];
+ if (mbox_reg != GEN6_NOSYNC) {
+ intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(signaller, mbox_reg);
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, MI_NOOP);
+ } else {
+ intel_ring_emit(signaller, MI_NOOP);
+ intel_ring_emit(signaller, MI_NOOP);
+ intel_ring_emit(signaller, MI_NOOP);
+ intel_ring_emit(signaller, MI_NOOP);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ *
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+static int
+gen6_add_request(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ ret = ring->semaphore.signal(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ __intel_ring_advance(ring);
+
+ return 0;
+}
+
+static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->last_seqno < seqno;
+}
+
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+static int
+gen6_ring_sync(struct intel_engine_cs *waiter,
+ struct intel_engine_cs *signaller,
+ u32 seqno)
+{
+ u32 dw1 = MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER;
+ u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+ int ret;
+
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ /* If seqno wrap happened, omit the wait with no-ops */
+ if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ intel_ring_emit(waiter, dw1 | wait_mbox);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ } else {
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ }
+ intel_ring_advance(waiter);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
+do { \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
+} while (0)
+
+static int
+pc_render_add_request(struct intel_engine_cs *ring)
+{
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ int ret;
+
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 2 * CACHELINE_BYTES;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 2 * CACHELINE_BYTES;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 2 * CACHELINE_BYTES;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 2 * CACHELINE_BYTES;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring, 0);
+ __intel_ring_advance(ring);
+
+ return 0;
+}
+
+static u32
+gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+ /* Workaround to force correct ordering between irq and seqno writes on
+ * ivb (and maybe also on snb) by reading from a CS register (like
+ * ACTHD) before reading the status page. */
+ if (!lazy_coherency) {
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ POSTING_READ(RING_ACTHD(ring->mmio_base));
+ }
+
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static u32
+ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+ intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
+static u32
+pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+ return ring->scratch.cpu_page[0];
+}
+
+static void
+pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+ ring->scratch.cpu_page[0] = seqno;
+}
+
+static bool
+gen5_ring_get_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0)
+ ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+gen5_ring_put_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0)
+ ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i9xx_ring_get_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+i9xx_ring_put_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i8xx_ring_get_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+i8xx_ring_put_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ /*
+ * VCS2 actually doesn't exist on Gen7. Only shut up
+ * gcc switch check warning
+ */
+ case VCS2:
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ /* XXX: gen8 returns to sanity */
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+
+ /*
+ * Flush the TLB for this page
+ *
+ * FIXME: These two bits have disappeared on gen8, so a question
+ * arises: do we still need this and if so how should we go about
+ * invalidating the TLB?
+ */
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+
+ /* ring should be idle before issuing a sync flush*/
+ WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
+}
+
+static int
+bsd_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static int
+i9xx_add_request(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ __intel_ring_advance(ring);
+
+ return 0;
+}
+
+static bool
+gen6_ring_get_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ if (HAS_L3_DPF(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring,
+ ~(ring->irq_enable_mask |
+ GT_PARITY_ERROR(dev)));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ if (HAS_L3_DPF(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+ else
+ I915_WRITE_IMR(ring, ~0);
+ ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+hsw_vebox_get_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+hsw_vebox_put_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ I915_WRITE_IMR(ring, ~0);
+ snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+gen8_ring_get_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ if (HAS_L3_DPF(dev) && ring->id == RCS) {
+ I915_WRITE_IMR(ring,
+ ~(ring->irq_enable_mask |
+ GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+ } else {
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ }
+ POSTING_READ(RING_IMR(ring->mmio_base));
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+gen8_ring_put_irq(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ if (HAS_L3_DPF(dev) && ring->id == RCS) {
+ I915_WRITE_IMR(ring,
+ ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ } else {
+ I915_WRITE_IMR(ring, ~0);
+ }
+ POSTING_READ(RING_IMR(ring->mmio_base));
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static int
+i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 length,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
+static int
+i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ u32 cs_offset = ring->scratch.gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+static int
+i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static void cleanup_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+
+ kunmap(sg_page(obj->pages->sgl));
+ i915_gem_object_ggtt_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ ring->status_page.obj = NULL;
+}
+
+static int init_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_gem_object *obj;
+
+ if ((obj = ring->status_page.obj) == NULL) {
+ int ret;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ if (ret)
+ goto err_unref;
+
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+ if (ret) {
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ring->status_page.obj = obj;
+ }
+
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+}
+
+static int init_phys_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
+static int allocate_ring_buffer(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (intel_ring_initialized(ring))
+ return 0;
+
+ obj = NULL;
+ if (!HAS_LLC(dev))
+ obj = i915_gem_object_create_stolen(dev, ringbuf->size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, ringbuf->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ if (ret)
+ goto err_unref;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+
+ ringbuf->virtual_start =
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+ ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ ringbuf->obj = obj;
+ return 0;
+
+err_unpin:
+ i915_gem_object_ggtt_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+}
+
+static int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ int ret;
+
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ ringbuf->size = 32 * PAGE_SIZE;
+ memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+
+ init_waitqueue_head(&ring->irq_queue);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(ring);
+ if (ret)
+ goto error;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_status_page(ring);
+ if (ret)
+ goto error;
+ }
+
+ ret = allocate_ring_buffer(ring);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
+ goto error;
+ }
+
+ /* Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ringbuf->effective_size = ringbuf->size;
+ if (IS_I830(dev) || IS_845G(dev))
+ ringbuf->effective_size -= 2 * CACHELINE_BYTES;
+
+ ret = i915_cmd_parser_init_ring(ring);
+ if (ret)
+ goto error;
+
+ ret = ring->init(ring);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ kfree(ringbuf);
+ ring->buffer = NULL;
+ return ret;
+}
+
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+
+ if (!intel_ring_initialized(ring))
+ return;
+
+ intel_stop_ring_buffer(ring);
+ WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+ iounmap(ringbuf->virtual_start);
+
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ drm_gem_object_unreference(&ringbuf->obj->base);
+ ringbuf->obj = NULL;
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ cleanup_status_page(ring);
+
+ i915_cmd_parser_fini_ring(ring);
+
+ kfree(ringbuf);
+ ring->buffer = NULL;
+}
+
+static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+
+ ringbuf->space = ring_space(ring);
+ if (ringbuf->space >= n)
+ return 0;
+ }
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
+ seqno = request->seqno;
+ break;
+ }
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(ring);
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+
+ ringbuf->space = ring_space(ring);
+ return 0;
+}
+
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ unsigned long end;
+ int ret;
+
+ ret = intel_ring_wait_request(ring, n);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /* force the tail write in case we have been skipping them */
+ __intel_ring_advance(ring);
+
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
+
+ trace_i915_ring_wait_begin(ring);
+ do {
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->space = ring_space(ring);
+ if (ringbuf->space >= n) {
+ ret = 0;
+ break;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
+ dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
+ msleep(1);
+
+ if (dev_priv->mm.interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
+ if (ret)
+ break;
+
+ if (time_after(jiffies, end)) {
+ ret = -EBUSY;
+ break;
+ }
+ } while (1);
+ trace_i915_ring_wait_end(ring);
+ return ret;
+}
+
+static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+{
+ uint32_t __iomem *virt;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ int rem = ringbuf->size - ringbuf->tail;
+
+ if (ringbuf->space < rem) {
+ int ret = ring_wait_for_space(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = ringbuf->virtual_start + ringbuf->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
+
+ ringbuf->tail = 0;
+ ringbuf->space = ring_space(ring);
+
+ return 0;
+}
+
+int intel_ring_idle(struct intel_engine_cs *ring)
+{
+ u32 seqno;
+ int ret;
+
+ /* We need to add any requests required to flush the objects and ring */
+ if (ring->outstanding_lazy_seqno) {
+ ret = i915_add_request(ring, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait upon the last request to be completed */
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_engine_cs *ring)
+{
+ if (ring->outstanding_lazy_seqno)
+ return 0;
+
+ if (ring->preallocated_lazy_request == NULL) {
+ struct drm_i915_gem_request *request;
+
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ ring->preallocated_lazy_request = request;
+ }
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring,
+ int bytes)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ int ret;
+
+ if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ringbuf->space < bytes)) {
+ ret = ring_wait_for_space(ring, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_ring_begin(struct intel_engine_cs *ring,
+ int num_dwords)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
+ ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+ if (ret)
+ return ret;
+
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
+ ring->buffer->space -= num_dwords * sizeof(uint32_t);
+ return 0;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_engine_cs *ring)
+{
+ int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ int ret;
+
+ if (num_dwords == 0)
+ return 0;
+
+ num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
+ ret = intel_ring_begin(ring, num_dwords);
+ if (ret)
+ return ret;
+
+ while (num_dwords--)
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ BUG_ON(ring->outstanding_lazy_seqno);
+
+ if (INTEL_INFO(ring->dev)->gen >= 6) {
+ I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+ if (HAS_VEBOX(ring->dev))
+ I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
+ }
+
+ ring->set_seqno(ring, seqno);
+ ring->hangcheck.seqno = seqno;
+}
+
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
+ u32 value)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ /* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
+ I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+}
+
+static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate, u32 flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ cmd += 1;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
+ } else {
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ }
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static int
+gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
+ unsigned flags)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
+ !(flags & I915_DISPATCH_SECURE);
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ /* FIXME(BDW): Address space and security selectors. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_ring_emit(ring, lower_32_bits(offset));
+ intel_ring_emit(ring, upper_32_bits(offset));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Blitter support (SandyBridge+) */
+
+static int gen6_ring_flush(struct intel_engine_cs *ring,
+ u32 invalidate, u32 flush)
+{
+ struct drm_device *dev = ring->dev;
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ cmd += 1;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
+ } else {
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ }
+ intel_ring_advance(ring);
+
+ if (IS_GEN7(dev) && !invalidate && flush)
+ return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+
+ return 0;
+}
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->flush = gen8_render_ring_flush;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ } else {
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ }
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8 platform.
+ * And there is no VCS2 ring on the pre-gen8 platform. So the
+ * semaphore between RCS and VCS2 is initialized as INVALID.
+ * Gen8 will initialize the sema between VCS2 and RCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = pc_render_get_seqno;
+ ring->set_seqno = pc_render_set_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+ GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ }
+ ring->write_tail = ring_write_tail;
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (IS_GEN8(dev))
+ ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->scratch.obj = obj;
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ }
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ int ret;
+
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* non-kms not supported on gen6+ */
+ ret = -ENODEV;
+ goto err_ringbuf;
+ }
+
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+
+ ringbuf->size = size;
+ ringbuf->effective_size = ringbuf->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ringbuf->effective_size -= 2 * CACHELINE_BYTES;
+
+ ringbuf->virtual_start = ioremap_wc(start, size);
+ if (ringbuf->virtual_start == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ ret = -ENOMEM;
+ goto err_ringbuf;
+ }
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_status_page(ring);
+ if (ret)
+ goto err_vstart;
+ }
+
+ return 0;
+
+err_vstart:
+ iounmap(ringbuf->virtual_start);
+err_ringbuf:
+ kfree(ringbuf);
+ ring->buffer = NULL;
+ return ret;
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_bsd_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer =
+ gen8_ring_dispatch_execbuffer;
+ } else {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer =
+ gen6_ring_dispatch_execbuffer;
+ }
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8 platform.
+ * And there is no VCS2 ring on the pre-gen8 platform. So the
+ * semaphore between VCS and VCS2 is initialized as INVALID.
+ * Gen8 will initialize the sema between VCS2 and VCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+/**
+ * Initialize the second BSD ring for Broadwell GT3.
+ * It is noted that this only exists on Broadwell GT3.
+ */
+int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+
+ if ((INTEL_INFO(dev)->gen != 8)) {
+ DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
+ return -EINVAL;
+ }
+
+ ring->name = "bds2_ring";
+ ring->id = VCS2;
+
+ ring->write_tail = ring_write_tail;
+ ring->mmio_base = GEN8_BSD2_RING_BASE;
+ ring->flush = gen6_bsd_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer =
+ gen8_ring_dispatch_execbuffer;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on the pre-gen8. And there
+ * is no bsd2 ring on the pre-gen8. So now the semaphore_register
+ * between VCS2 and other ring is initialized as invalid.
+ * Gen8 will initialize the sema between VCS2 and other ring later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ } else {
+ ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ }
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8 platform. And
+ * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
+ * between BCS and VCS2 is initialized as INVALID.
+ * Gen8 will initialize the sema between BCS and VCS2 later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_vebox_ring_buffer(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+
+ ring->name = "video enhancement ring";
+ ring->id = VECS;
+
+ ring->mmio_base = VEBOX_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ } else {
+ ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ ring->irq_get = hsw_vebox_get_irq;
+ ring->irq_put = hsw_vebox_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ }
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+void
+intel_stop_ring_buffer(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ if (!intel_ring_initialized(ring))
+ return;
+
+ ret = intel_ring_idle(ring);
+ if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
+ stop_ring(ring);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 00000000000..e72017bdcd7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,341 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+#include <linux/hashtable.h>
+
+#define I915_CMD_HASH_ORDER 9
+
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
+struct intel_hw_status_page {
+ u32 *page_addr;
+ unsigned int gfx_addr;
+ struct drm_i915_gem_object *obj;
+};
+
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+
+#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+
+enum intel_ring_hangcheck_action {
+ HANGCHECK_IDLE = 0,
+ HANGCHECK_WAIT,
+ HANGCHECK_ACTIVE,
+ HANGCHECK_KICK,
+ HANGCHECK_HUNG,
+};
+
+#define HANGCHECK_SCORE_RING_HUNG 31
+
+struct intel_ring_hangcheck {
+ u64 acthd;
+ u32 seqno;
+ int score;
+ enum intel_ring_hangcheck_action action;
+ int deadlock;
+};
+
+struct intel_ringbuffer {
+ struct drm_i915_gem_object *obj;
+ void __iomem *virtual_start;
+
+ u32 head;
+ u32 tail;
+ int space;
+ int size;
+ int effective_size;
+
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+};
+
+struct intel_engine_cs {
+ const char *name;
+ enum intel_ring_id {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ VECS,
+ VCS2
+ } id;
+#define I915_NUM_RINGS 5
+#define LAST_USER_RING (VECS + 1)
+ u32 mmio_base;
+ struct drm_device *dev;
+ struct intel_ringbuffer *buffer;
+
+ struct intel_hw_status_page status_page;
+
+ unsigned irq_refcount; /* protected by dev_priv->irq_lock */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ u32 trace_irq_seqno;
+ bool __must_check (*irq_get)(struct intel_engine_cs *ring);
+ void (*irq_put)(struct intel_engine_cs *ring);
+
+ int (*init)(struct intel_engine_cs *ring);
+
+ void (*write_tail)(struct intel_engine_cs *ring,
+ u32 value);
+ int __must_check (*flush)(struct intel_engine_cs *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*add_request)(struct intel_engine_cs *ring);
+ /* Some chipsets are not quite as coherent as advertised and need
+ * an expensive kick to force a true read of the up-to-date seqno.
+ * However, the up-to-date seqno is not always required and the last
+ * seen value is good enough. Note that the seqno will always be
+ * monotonic, even if not coherent.
+ */
+ u32 (*get_seqno)(struct intel_engine_cs *ring,
+ bool lazy_coherency);
+ void (*set_seqno)(struct intel_engine_cs *ring,
+ u32 seqno);
+ int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
+ u64 offset, u32 length,
+ unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+ void (*cleanup)(struct intel_engine_cs *ring);
+
+ struct {
+ u32 sync_seqno[I915_NUM_RINGS-1];
+
+ struct {
+ /* our mbox written by others */
+ u32 wait[I915_NUM_RINGS];
+ /* mboxes this ring signals to */
+ u32 signal[I915_NUM_RINGS];
+ } mbox;
+
+ /* AKA wait() */
+ int (*sync_to)(struct intel_engine_cs *ring,
+ struct intel_engine_cs *to,
+ u32 seqno);
+ int (*signal)(struct intel_engine_cs *signaller,
+ /* num_dwords needed by caller */
+ unsigned int num_dwords);
+ } semaphore;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * Do we have some not yet emitted requests outstanding?
+ */
+ struct drm_i915_gem_request *preallocated_lazy_request;
+ u32 outstanding_lazy_seqno;
+ bool gpu_caches_dirty;
+ bool fbc_dirty;
+
+ wait_queue_head_t irq_queue;
+
+ struct intel_context *default_context;
+ struct intel_context *last_context;
+
+ struct intel_ring_hangcheck hangcheck;
+
+ struct {
+ struct drm_i915_gem_object *obj;
+ u32 gtt_offset;
+ volatile u32 *cpu_page;
+ } scratch;
+
+ bool needs_cmd_parser;
+
+ /*
+ * Table of commands the command parser needs to know about
+ * for this ring.
+ */
+ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
+
+ /*
+ * Table of registers allowed in commands that read/write registers.
+ */
+ const u32 *reg_table;
+ int reg_count;
+
+ /*
+ * Table of registers allowed in commands that read/write registers, but
+ * only from the DRM master.
+ */
+ const u32 *master_reg_table;
+ int master_reg_count;
+
+ /*
+ * Returns the bitmask for the length field of the specified command.
+ * Return 0 for an unrecognized/invalid command.
+ *
+ * If the command parser finds an entry for a command in the ring's
+ * cmd_tables, it gets the command's length based on the table entry.
+ * If not, it calls this function to determine the per-ring length field
+ * encoding for the command (i.e. certain opcode ranges use certain bits
+ * to encode the command length in the header).
+ */
+ u32 (*get_cmd_length_mask)(u32 cmd_header);
+};
+
+static inline bool
+intel_ring_initialized(struct intel_engine_cs *ring)
+{
+ return ring->buffer && ring->buffer->obj;
+}
+
+static inline unsigned
+intel_ring_flag(struct intel_engine_cs *ring)
+{
+ return 1 << ring->id;
+}
+
+static inline u32
+intel_ring_sync_index(struct intel_engine_cs *ring,
+ struct intel_engine_cs *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+static inline u32
+intel_read_status_page(struct intel_engine_cs *ring,
+ int reg)
+{
+ /* Ensure that the compiler doesn't optimize away the load. */
+ barrier();
+ return ring->status_page.page_addr[reg];
+}
+
+static inline void
+intel_write_status_page(struct intel_engine_cs *ring,
+ int reg, u32 value)
+{
+ ring->status_page.page_addr[reg] = value;
+}
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+
+void intel_stop_ring_buffer(struct intel_engine_cs *ring);
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+
+int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+static inline void intel_ring_emit(struct intel_engine_cs *ring,
+ u32 data)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
+ ringbuf->tail += 4;
+}
+static inline void intel_ring_advance(struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ ringbuf->tail &= ringbuf->size - 1;
+}
+void __intel_ring_advance(struct intel_engine_cs *ring);
+
+int __must_check intel_ring_idle(struct intel_engine_cs *ring);
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
+int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_bsd2_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+int intel_init_vebox_ring_buffer(struct drm_device *dev);
+
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+void intel_ring_setup_status_page(struct intel_engine_cs *ring);
+
+static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
+{
+ return ring->buffer->tail;
+}
+
+static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
+{
+ BUG_ON(ring->outstanding_lazy_seqno == 0);
+ return ring->outstanding_lazy_seqno;
+}
+
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
+{
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+}
+
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de5144c8c15..20375cc7f82 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -26,17 +26,33 @@
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include <linux/delay.h>
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include "intel_drv.h"
-#include "drm_edid.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-static char *tv_format_names[] = {
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
@@ -48,18 +64,23 @@ static char *tv_format_names[] = {
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-struct intel_sdvo_priv {
+struct intel_sdvo {
+ struct intel_encoder base;
+
+ struct i2c_adapter *i2c;
u8 slave_addr;
+ struct i2c_adapter ddc;
+
/* Register for the SDVO device: SDVOB or SDVOC */
- int output_device;
+ uint32_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
/*
* Capabilities of the SDVO device returned by
- * i830_sdvo_get_capabilities()
+ * intel_sdvo_get_capabilities()
*/
struct intel_sdvo_caps caps;
@@ -72,6 +93,18 @@ struct intel_sdvo_priv {
*/
uint16_t attached_output;
+ /*
+ * Hotplug activation bits for this device
+ */
+ uint16_t hotplug_active;
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+ bool color_range_auto;
+
/**
* This is set if we're going to treat the device as TV-out.
*
@@ -81,74 +114,77 @@ struct intel_sdvo_priv {
*/
bool is_tv;
- /* This is for current tv format name */
- char *tv_format_name;
+ /* On different gens SDVOB is at different places. */
+ bool is_sdvob;
- /* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
- int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+ /* This is for current tv format name */
+ int tv_format_index;
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+ bool rgb_quant_range_selectable;
/**
- * This is set if we detect output of sdvo device as LVDS.
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
*/
bool is_lvds;
/**
- * This is sdvo flags for input timing.
- */
- uint8_t sdvo_flags;
-
- /**
* This is sdvo fixed pannel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
/*
- * supported encoding mode, used to determine whether HDMI is
- * supported
+ * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
*/
- struct intel_sdvo_encode encode;
+ uint8_t dtd_sdvo_flags;
+};
- /* DDC bus used by this SDVO output */
- uint8_t ddc_bus;
+struct intel_sdvo_connector {
+ struct intel_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
- /* Mac mini hack -- use the same DDC as the analog connector */
- struct i2c_adapter *analog_ddc_bus;
+ enum hdmi_force_audio force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
- struct intel_sdvo_dtd save_output_dtd[16];
- u32 save_SDVOX;
/* add the property for the SDVO-TV */
- struct drm_property *left_property;
- struct drm_property *right_property;
- struct drm_property *top_property;
- struct drm_property *bottom_property;
- struct drm_property *hpos_property;
- struct drm_property *vpos_property;
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
/* add the property for the SDVO-TV/LVDS */
- struct drm_property *brightness_property;
- struct drm_property *contrast_property;
- struct drm_property *saturation_property;
- struct drm_property *hue_property;
+ struct drm_property *brightness;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
+
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
@@ -157,29 +193,63 @@ struct intel_sdvo_priv {
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
};
+static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+ return to_sdvo(intel_attached_encoder(connector));
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
static bool
-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags);
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
-static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
u32 bval = val, cval = val;
int i;
- if (sdvo_priv->output_device == SDVOB) {
- cval = I915_READ(SDVOC);
- } else {
- bval = I915_READ(SDVOB);
+ if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(intel_sdvo->sdvo_reg, val);
+ I915_READ(intel_sdvo->sdvo_reg);
+ return;
}
+
+ if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
+ cval = I915_READ(GEN3_SDVOC);
+ else
+ bval = I915_READ(GEN3_SDVOB);
+
/*
* Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'.
@@ -187,209 +257,190 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
*/
for (i = 0; i < 2; i++)
{
- I915_WRITE(SDVOB, bval);
- I915_READ(SDVOB);
- I915_WRITE(SDVOC, cval);
- I915_READ(SDVOC);
+ I915_WRITE(GEN3_SDVOB, bval);
+ I915_READ(GEN3_SDVOB);
+ I915_WRITE(GEN3_SDVOC, cval);
+ I915_READ(GEN3_SDVOC);
}
}
-static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
- u8 *ch)
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- u8 out_buf[2];
- u8 buf[2];
- int ret;
-
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
- .addr = sdvo_priv->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = buf,
+ .buf = ch,
}
};
+ int ret;
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
- {
- *ch = buf[0];
+ if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
return true;
- }
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
return false;
}
-static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
- u8 ch)
-{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- u8 out_buf[2];
- struct i2c_msg msgs[] = {
- {
- .addr = sdvo_priv->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- out_buf[0] = addr;
- out_buf[1] = ch;
-
- if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
- {
- return true;
- }
- return false;
-}
-
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
- char *name;
+ const char *name;
} sdvo_cmd_names[] = {
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
- /* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- /* HDMI op code */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
-#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
-static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
- void *args, int args_len)
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int i;
+ int i, pos = 0;
+#define BUF_LEN 256
+ char buffer[BUF_LEN];
+
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
- DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(sdvo_priv), cmd);
- for (i = 0; i < args_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
- for (; i < 8; i++)
- DRM_LOG_KMS(" ");
- for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
+ for (i = 0; i < args_len; i++) {
+ BUF_PRINT("%02X ", ((u8 *)args)[i]);
+ }
+ for (; i < 8; i++) {
+ BUF_PRINT(" ");
+ }
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
- if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
- DRM_LOG_KMS("(%02X)", cmd);
- DRM_LOG_KMS("\n");
-}
-
-static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
- void *args, int args_len)
-{
- int i;
-
- intel_sdvo_debug_write(intel_output, cmd, args, args_len);
-
- for (i = 0; i < args_len; i++) {
- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]);
+ if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+ BUF_PRINT("(%02X)", cmd);
}
+ BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
- intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
+ DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
static const char *cmd_status_names[] = {
@@ -402,53 +453,145 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_output *intel_output,
- void *response, int response_len,
- u8 status)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int i;
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
- for (i = 0; i < response_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
- for (; i < 8; i++)
- DRM_LOG_KMS(" ");
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
- else
- DRM_LOG_KMS("(??? %d)", status);
- DRM_LOG_KMS("\n");
+ /* Would be simpler to allocate both in one go ? */
+ buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
+ if (!buf)
+ return false;
+
+ msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs) {
+ kfree(buf);
+ return false;
+ }
+
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ ret = false;
+ goto out;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ ret = false;
+ }
+
+out:
+ kfree(msgs);
+ kfree(buf);
+ return ret;
}
-static u8 intel_sdvo_read_response(struct intel_output *intel_output,
- void *response, int response_len)
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
{
- int i;
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
- u8 retry = 50;
-
- while (retry--) {
- /* Read the command response */
- for (i = 0; i < response_len; i++) {
- intel_sdvo_read_byte(intel_output,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]);
- }
+ int i, pos = 0;
+#define BUF_LEN 256
+ char buffer[BUF_LEN];
+
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15µs.
+ */
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+ if (retry < 10)
+ msleep(15);
+ else
+ udelay(15);
- /* read the return status */
- intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS,
- &status);
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
- intel_sdvo_debug_response(intel_output, response, response_len,
- status);
- if (status != SDVO_CMD_STATUS_PENDING)
- return status;
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ BUF_PRINT("(%s)", cmd_status_names[status]);
+ else
+ BUF_PRINT("(??? %d)", status);
- mdelay(50);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
+ BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
- return status;
+ DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
+ return true;
+
+log_fail:
+ DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
+ return false;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -461,34 +604,38 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
return 4;
}
-/**
- * Don't check status code from this as it switches the bus back to the
- * SDVO chips which defeats the purpose of doing a bus switch in the first
- * place.
- */
-static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
- u8 target)
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
{
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
+ /* This must be the immediately preceding write before the i2c xfer */
+ return intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
}
-static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- struct intel_sdvo_set_target_input_args targets = {0};
- u8 status;
-
- if (target_0 && target_1)
- return SDVO_CMD_STATUS_NOTSUPP;
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
- if (target_1)
- targets.target_1 = 1;
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets,
- sizeof(targets));
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+ return false;
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ return intel_sdvo_read_response(intel_sdvo, value, len);
+}
- return (status == SDVO_CMD_STATUS_SUCCESS);
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
}
/**
@@ -497,14 +644,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
-static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
{
struct intel_sdvo_get_trained_inputs_response response;
- u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, sizeof(response));
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ BUILD_BUG_ON(sizeof(response) != 1);
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
return false;
*input_1 = response.input0_trained;
@@ -512,32 +658,26 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo
return true;
}
-static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output,
- u16 *outputs)
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs));
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
}
-static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output,
- u16 outputs)
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
- sizeof(outputs));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
}
-static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output,
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
- u8 status, state = SDVO_ENCODER_STATE_ON;
+ u8 state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -554,122 +694,78 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output
break;
}
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
- sizeof(state));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
}
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
int *clock_min,
int *clock_max)
{
struct intel_sdvo_pixel_clock_range clocks;
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
- NULL, 0);
- status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks));
-
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ BUILD_BUG_ON(sizeof(clocks) != 4);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
-
return true;
}
-static bool intel_sdvo_set_target_output(struct intel_output *intel_output,
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
u16 outputs)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
- sizeof(outputs));
-
- status = intel_sdvo_read_response(intel_output, NULL, 0);
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
}
-static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, cmd, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool intel_sdvo_get_input_timing(struct intel_output *intel_output,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_output,
- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+ return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
-static bool intel_sdvo_get_output_timing(struct intel_output *intel_output,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_output,
- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd,
+static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
-static bool intel_sdvo_set_input_timing(struct intel_output *intel_output,
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_output,
+ return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_output,
+ return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
+static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_timing(intel_sdvo,
+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
static bool
-intel_sdvo_create_preferred_input_timing(struct intel_output *output,
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- uint8_t status;
memset(&args, 0, sizeof(args));
args.clock = clock;
@@ -677,94 +773,59 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
args.height = height;
args.interlace = 0;
- if (sdvo_priv->is_lvds &&
- (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
- sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
+ if (intel_sdvo->is_lvds &&
+ (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
- intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
- &args, sizeof(args));
- status = intel_sdvo_read_response(output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
}
-static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- bool status;
-
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
- NULL, 0);
-
- status = intel_sdvo_read_response(output, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
- NULL, 0);
-
- status = intel_sdvo_read_response(output, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return false;
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
}
-static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
{
- u8 response, status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-}
-
-static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
- status = intel_sdvo_read_response(intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
}
static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
uint16_t h_sync_offset, v_sync_offset;
+ int mode_clock;
+
+ memset(dtd, 0, sizeof(*dtd));
- width = mode->crtc_hdisplay;
- height = mode->crtc_vdisplay;
+ width = mode->hdisplay;
+ height = mode->vdisplay;
/* do some mode translations */
- h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
- h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
- v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
- v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
- h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
- v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
+
+ mode_clock = mode->clock;
+ mode_clock /= 10;
+ dtd->part1.clock = mode_clock;
- dtd->part1.clock = mode->clock / 10;
dtd->part1.h_active = width & 0xff;
dtd->part1.h_blank = h_blank_len & 0xff;
dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -783,87 +844,84 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- dtd->part2.dtd_flags |= 0x2;
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- dtd->part2.dtd_flags |= 0x4;
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
- dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
- dtd->part2.reserved = 0;
}
-static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
- struct intel_sdvo_dtd *dtd)
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
+ const struct intel_sdvo_dtd *dtd)
{
- mode->hdisplay = dtd->part1.h_active;
- mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
- mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
- mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
- mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
- mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
- mode->htotal = mode->hdisplay + dtd->part1.h_blank;
- mode->htotal += (dtd->part1.h_high & 0xf) << 8;
-
- mode->vdisplay = dtd->part1.v_active;
- mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
- mode->vsync_start = mode->vdisplay;
- mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
- mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
- mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
- mode->vsync_end = mode->vsync_start +
- (dtd->part2.v_sync_off_width & 0xf);
- mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
- mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
- mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+ struct drm_display_mode mode = {};
- mode->clock = dtd->part1.clock * 10;
+ mode.hdisplay = dtd->part1.h_active;
+ mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
+ mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
+ mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode.htotal = mode.hdisplay + dtd->part1.h_blank;
+ mode.htotal += (dtd->part1.h_high & 0xf) << 8;
- mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- if (dtd->part2.dtd_flags & 0x2)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dtd->part2.dtd_flags & 0x4)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
-}
+ mode.vdisplay = dtd->part1.v_active;
+ mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode.vsync_start = mode.vdisplay;
+ mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode.vsync_end = mode.vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
+ mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
-static bool intel_sdvo_get_supp_encode(struct intel_output *output,
- struct intel_sdvo_encode *encode)
-{
- uint8_t status;
+ mode.clock = dtd->part1.clock * 10;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(output, encode, sizeof(*encode));
- if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
- memset(encode, 0, sizeof(*encode));
- return false;
- }
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ mode.flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ mode.flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NVSYNC;
- return true;
+ drm_mode_set_crtcinfo(&mode, 0);
+
+ drm_mode_copy(pmode, &mode);
}
-static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode)
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
- uint8_t status;
+ struct intel_sdvo_encode encode;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1);
- status = intel_sdvo_read_response(output, NULL, 0);
+ BUILD_BUG_ON(sizeof(encode) != 2);
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
- return (status == SDVO_CMD_STATUS_SUCCESS);
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+ uint8_t mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
}
-static bool intel_sdvo_set_colorimetry(struct intel_output *output,
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
uint8_t mode)
{
- uint8_t status;
-
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
- status = intel_sdvo_read_response(output, NULL, 0);
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
#if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
int i, j;
uint8_t set_buf_index[2];
@@ -872,272 +930,264 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
uint8_t buf[48];
uint8_t *pos;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
- intel_sdvo_read_response(output, &av_split, 1);
+ intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX,
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
- intel_sdvo_read_response(output, &buf_size, 1);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(encoder, &buf_size, 1);
pos = buf;
for (j = 0; j <= buf_size; j += 8) {
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA,
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
NULL, 0);
- intel_sdvo_read_response(output, pos, 8);
+ intel_sdvo_read_response(encoder, pos, 8);
pos += 8;
}
}
}
#endif
-static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index,
- uint8_t *data, int8_t size, uint8_t tx_rate)
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned if_index, uint8_t tx_rate,
+ const uint8_t *data, unsigned length)
{
- uint8_t set_buf_index[2];
+ uint8_t set_buf_index[2] = { if_index, 0 };
+ uint8_t hbuf_size, tmp[8];
+ int i;
- set_buf_index[0] = index;
- set_buf_index[1] = 0;
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2);
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return false;
- for (; size > 0; size -= 8) {
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8);
- data += 8;
- }
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
-}
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
-static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
-{
- uint8_t csum = 0;
- int i;
+ for (i = 0; i < hbuf_size; i += 8) {
+ memset(tmp, 0, 8);
+ if (i < length)
+ memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
- for (i = 0; i < size; i++)
- csum += data[i];
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ tmp, 8))
+ return false;
+ }
- return 0x100 - csum;
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
}
-#define DIP_TYPE_AVI 0x82
-#define DIP_VERSION_AVI 0x2
-#define DIP_LEN_AVI 13
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *adjusted_mode)
+{
+ uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ union hdmi_infoframe frame;
+ int ret;
+ ssize_t len;
-struct dip_infoframe {
- uint8_t type;
- uint8_t version;
- uint8_t len;
- uint8_t checksum;
- union {
- struct {
- /* Packet Byte #1 */
- uint8_t S:2;
- uint8_t B:2;
- uint8_t A:1;
- uint8_t Y:2;
- uint8_t rsvd1:1;
- /* Packet Byte #2 */
- uint8_t R:4;
- uint8_t M:2;
- uint8_t C:2;
- /* Packet Byte #3 */
- uint8_t SC:2;
- uint8_t Q:2;
- uint8_t EC:3;
- uint8_t ITC:1;
- /* Packet Byte #4 */
- uint8_t VIC:7;
- uint8_t rsvd2:1;
- /* Packet Byte #5 */
- uint8_t PR:4;
- uint8_t rsvd3:4;
- /* Packet Byte #6~13 */
- uint16_t top_bar_end;
- uint16_t bottom_bar_start;
- uint16_t left_bar_end;
- uint16_t right_bar_start;
- } avi;
- struct {
- /* Packet Byte #1 */
- uint8_t channel_count:3;
- uint8_t rsvd1:1;
- uint8_t coding_type:4;
- /* Packet Byte #2 */
- uint8_t sample_size:2; /* SS0, SS1 */
- uint8_t sample_frequency:3;
- uint8_t rsvd2:3;
- /* Packet Byte #3 */
- uint8_t coding_type_private:5;
- uint8_t rsvd3:3;
- /* Packet Byte #4 */
- uint8_t channel_allocation;
- /* Packet Byte #5 */
- uint8_t rsvd4:3;
- uint8_t level_shift:4;
- uint8_t downmix_inhibit:1;
- } audio;
- uint8_t payload[28];
- } __attribute__ ((packed)) u;
-} __attribute__((packed));
-
-static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
- struct drm_display_mode * mode)
-{
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .version = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ adjusted_mode);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return false;
+ }
+
+ if (intel_sdvo->rgb_quant_range_selectable) {
+ if (intel_crtc->config.limited_color_range)
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ else
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_FULL;
+ }
+
+ len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
+ if (len < 0)
+ return false;
- avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
- 4 + avi_if.len);
- intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len,
- SDVO_HBUF_TX_VSYNC);
+ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ SDVO_HBUF_TX_VSYNC,
+ sdvo_data, sizeof(sdvo_data));
}
-static void intel_sdvo_set_tv_format(struct intel_output *output)
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
{
-
struct intel_sdvo_tv_format format;
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- uint32_t format_map, i;
- uint8_t status;
-
- for (i = 0; i < TV_FORMAT_NUM; i++)
- if (tv_format_names[i] == sdvo_priv->tv_format_name)
- break;
+ uint32_t format_map;
- format_map = 1 << i;
+ format_map = 1 << intel_sdvo->tv_format_index;
memset(&format, 0, sizeof(format));
- memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
- sizeof(format) : sizeof(format_map));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map,
- sizeof(format));
-
- status = intel_sdvo_read_response(output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- DRM_DEBUG_KMS("%s: Failed to set TV format\n",
- SDVO_NAME(sdvo_priv));
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
}
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode)
{
- struct intel_output *output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *dev_priv = output->dev_priv;
-
- if (dev_priv->is_tv) {
- struct intel_sdvo_dtd output_dtd;
- bool success;
-
- /* We need to construct preferred input timings based on our
- * output timings. To do that, we have to set the output
- * timings, even though this isn't really the right place in
- * the sequence to do it. Oh well.
- */
-
-
- /* Set output timings */
- intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- intel_sdvo_set_target_output(output,
- dev_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &output_dtd);
-
- /* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ struct intel_sdvo_dtd output_dtd;
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return false;
- success = intel_sdvo_create_preferred_input_timing(output,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay);
- if (success) {
- struct intel_sdvo_dtd input_dtd;
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ return false;
- intel_sdvo_get_preferred_input_timing(output,
- &input_dtd);
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+ return true;
+}
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
+static bool
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo_dtd input_dtd;
- mode->clock = adjusted_mode->clock;
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
- adjusted_mode->clock *=
- intel_sdvo_get_pixel_multiplier(mode);
- } else {
- return false;
- }
- } else if (dev_priv->is_lvds) {
- struct intel_sdvo_dtd output_dtd;
- bool success;
+ if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
- drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
- /* Set output timings */
- intel_sdvo_get_dtd_from_mode(&output_dtd,
- dev_priv->sdvo_lvds_fixed_mode);
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+ &input_dtd))
+ return false;
- intel_sdvo_set_target_output(output,
- dev_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &output_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
- /* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ return true;
+}
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
+{
+ unsigned dotclock = pipe_config->port_clock;
+ struct dpll *clock = &pipe_config->dpll;
+
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (dotclock >= 100000 && dotclock < 140500) {
+ clock->p1 = 2;
+ clock->p2 = 10;
+ clock->n = 3;
+ clock->m1 = 16;
+ clock->m2 = 8;
+ } else if (dotclock >= 140500 && dotclock <= 200000) {
+ clock->p1 = 1;
+ clock->p2 = 10;
+ clock->n = 6;
+ clock->m1 = 12;
+ clock->m2 = 8;
+ } else {
+ WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
+ }
- success = intel_sdvo_create_preferred_input_timing(
- output,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay);
+ pipe_config->clock_set = true;
+}
- if (success) {
- struct intel_sdvo_dtd input_dtd;
+static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->requested_mode;
- intel_sdvo_get_preferred_input_timing(output,
- &input_dtd);
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+ DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+ pipe_config->pipe_bpp = 8*3;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ if (HAS_PCH_SPLIT(encoder->base.dev))
+ pipe_config->has_pch_encoder = true;
- mode->clock = adjusted_mode->clock;
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+ return false;
- adjusted_mode->clock *=
- intel_sdvo_get_pixel_multiplier(mode);
- } else {
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ pipe_config->sdvo_tv_clock = true;
+ } else if (intel_sdvo->is_lvds) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+ intel_sdvo->sdvo_lvds_fixed_mode))
return false;
- }
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ pipe_config->pixel_multiplier =
+ intel_sdvo_get_pixel_multiplier(adjusted_mode);
+
+ pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+
+ if (intel_sdvo->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ /* FIXME: This bit is only valid when using TMDS encoding and 8
+ * bit per color mode. */
+ if (pipe_config->has_hdmi_sink &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ pipe_config->limited_color_range = true;
} else {
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ if (pipe_config->has_hdmi_sink &&
+ intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
}
+
+ /* Clock computation needs to happen after pixel multiplier. */
+ if (intel_sdvo->is_tv)
+ i9xx_adjust_sdvo_tv_clock(pipe_config);
+
return true;
}
-static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- u32 sdvox = 0;
- int sdvo_pixel_multiply;
+ struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config.requested_mode;
+ struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
+ u32 sdvox;
struct intel_sdvo_in_out_map in_out;
- struct intel_sdvo_dtd input_dtd;
- u8 status;
+ struct intel_sdvo_dtd input_dtd, output_dtd;
+ int rate;
if (!mode)
return;
@@ -1148,507 +1198,619 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->controlled_output;
+ in_out.in0 = intel_sdvo->attached_output;
in_out.in1 = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP,
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- status = intel_sdvo_read_response(output, NULL, 0);
- if (sdvo_priv->is_hdmi) {
- intel_sdvo_set_avi_infoframe(output, mode);
- sdvox |= SDVO_AUDIO_ENABLE;
- }
-
- /* We have tried to get input timing in mode_fixup, and filled into
- adjusted_mode */
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
- } else
- intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
- /* If it's a TV, we already set the output timing in mode_fixup.
- * Otherwise, the output timing is equal to the input timing.
- */
- if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
- /* Set the output timing to the screen */
- intel_sdvo_set_target_output(output,
- sdvo_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &input_dtd);
- }
+ /* lvds has a special fixed output timing. */
+ if (intel_sdvo->is_lvds)
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+ else
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ DRM_INFO("Setting output timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
- if (sdvo_priv->is_tv)
- intel_sdvo_set_tv_format(output);
+ if (crtc->config.has_hdmi_sink) {
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+ } else
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
- /* We would like to use intel_sdvo_create_preferred_input_timing() to
- * provide the device with a timing it can support, if it supports that
- * feature. However, presumably we would need to adjust the CRTC to
- * output the preferred timing, and we don't support that currently.
- */
-#if 0
- success = intel_sdvo_create_preferred_input_timing(output, clock,
- width, height);
- if (success) {
- struct intel_sdvo_dtd *input_dtd;
+ if (intel_sdvo->is_tv &&
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
- intel_sdvo_get_preferred_input_timing(output, &input_dtd);
- intel_sdvo_set_input_timing(output, &input_dtd);
- }
-#else
- intel_sdvo_set_input_timing(output, &input_dtd);
-#endif
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- switch (intel_sdvo_get_pixel_multiplier(mode)) {
- case 1:
- intel_sdvo_set_clock_rate_mult(output,
- SDVO_CLOCK_RATE_MULT_1X);
- break;
- case 2:
- intel_sdvo_set_clock_rate_mult(output,
- SDVO_CLOCK_RATE_MULT_2X);
- break;
- case 4:
- intel_sdvo_set_clock_rate_mult(output,
- SDVO_CLOCK_RATE_MULT_4X);
- break;
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
+ if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+ DRM_INFO("Setting input timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
+
+ switch (crtc->config.pixel_multiplier) {
+ default:
+ WARN(1, "unknown pixel mutlipler specified\n");
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
}
+ if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+ return;
/* Set the SDVO control regs. */
- if (IS_I965G(dev)) {
- sdvox |= SDVO_BORDER_ENABLE |
- SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* The real mode polarity is set by the SDVO commands, using
+ * struct intel_sdvo_dtd. */
+ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+ if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
+ sdvox |= HDMI_COLOR_RANGE_16_235;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
} else {
- sdvox |= I915_READ(sdvo_priv->output_device);
- switch (sdvo_priv->output_device) {
- case SDVOB:
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
+ case GEN3_SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
- case SDVOC:
+ case GEN3_SDVOC:
sdvox &= SDVOC_PRESERVE_MASK;
break;
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
- if (IS_I965G(dev)) {
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+ else
+ sdvox |= SDVO_PIPE_SEL(crtc->pipe);
+
+ if (intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ sdvox |= (crtc->config.pixel_multiplier - 1)
+ << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_STALL_SELECT;
- intel_sdvo_write_sdvox(output, sdvox);
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
-static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- u32 temp;
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ u16 active_outputs = 0;
- if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_output, 0);
- if (0)
- intel_sdvo_set_encoder_power_state(intel_output, mode);
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(sdvo_priv->output_device);
- if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE);
- }
- }
- } else {
- bool input1, input2;
- int i;
- u8 status;
+ if (active_outputs & intel_sdvo_connector->output_flag)
+ return true;
+ else
+ return false;
+}
- temp = I915_READ(sdvo_priv->output_device);
- if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE);
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ u16 active_outputs = 0;
+ u32 tmp;
- status = intel_sdvo_get_trained_inputs(intel_output, &input1,
- &input2);
+ tmp = I915_READ(intel_sdvo->sdvo_reg);
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+ if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
+ return false;
- /* Warn if the device reported failure to sync.
- * A lot of SDVO devices fail to notify of sync, but it's
- * a given it the status is a success, we succeeded.
- */
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
- }
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
- if (0)
- intel_sdvo_set_encoder_power_state(intel_output, mode);
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output);
- }
- return;
+ return true;
}
-static void intel_sdvo_save(struct drm_connector *connector)
+static void intel_sdvo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int o;
-
- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output);
- intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct intel_sdvo_dtd dtd;
+ int encoder_pixel_multiplier = 0;
+ int dotclock;
+ u32 flags = 0, sdvox;
+ u8 val;
+ bool ret;
+
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+
+ ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
+ if (!ret) {
+ /* Some sdvo encoders are not spec compliant and don't
+ * implement the mandatory get_timings function. */
+ DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+ pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
+ } else {
+ if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_output, true, false);
- intel_sdvo_get_input_timing(intel_output,
- &sdvo_priv->save_input_dtd_1);
+ if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
}
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_output, false, true);
- intel_sdvo_get_input_timing(intel_output,
- &sdvo_priv->save_input_dtd_2);
+ pipe_config->adjusted_mode.flags |= flags;
+
+ /*
+ * pixel multiplier readout is tricky: Only on i915g/gm it is stored in
+ * the sdvo port register, on all other platforms it is part of the dpll
+ * state. Since the general pipe state readout happens before the
+ * encoder->get_config we so already have a valid pixel multplier on all
+ * other platfroms.
+ */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pipe_config->pixel_multiplier =
+ ((sdvox & SDVO_PORT_MULTIPLY_MASK)
+ >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output)
- {
- intel_sdvo_set_target_output(intel_output, this_output);
- intel_sdvo_get_output_timing(intel_output,
- &sdvo_priv->save_output_dtd[o]);
+ dotclock = pipe_config->port_clock;
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+ pipe_config->adjusted_mode.crtc_clock = dotclock;
+
+ /* Cross check the port pixel multiplier with the sdvo encoder state. */
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+ &val, 1)) {
+ switch (val) {
+ case SDVO_CLOCK_RATE_MULT_1X:
+ encoder_pixel_multiplier = 1;
+ break;
+ case SDVO_CLOCK_RATE_MULT_2X:
+ encoder_pixel_multiplier = 2;
+ break;
+ case SDVO_CLOCK_RATE_MULT_4X:
+ encoder_pixel_multiplier = 4;
+ break;
}
}
- if (sdvo_priv->is_tv) {
- /* XXX: Save TV format/enhancements. */
+
+ if (sdvox & HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+ &val, 1)) {
+ if (val == SDVO_ENCODE_HDMI)
+ pipe_config->has_hdmi_sink = true;
}
- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
+ WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
}
-static void intel_sdvo_restore(struct drm_connector *connector)
+static void intel_disable_sdvo(struct intel_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int o;
- int i;
- bool input1, input2;
- u8 status;
-
- intel_sdvo_set_active_outputs(intel_output, 0);
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ u32 temp;
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output) {
- intel_sdvo_set_target_output(intel_output, this_output);
- intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]);
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ /* HW workaround for IBX, we need to move the port to
+ * transcoder A before disabling it. */
+ if (HAS_PCH_IBX(encoder->base.dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(encoder->base.dev, pipe);
+ else
+ msleep(50);
+ }
}
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
+}
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_output, true, false);
- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1);
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ u32 temp;
+ bool input1, input2;
+ int i;
+ bool success;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0) {
+ /* HW workaround for IBX, we need to move the port
+ * to transcoder A before disabling it, so restore it here. */
+ if (HAS_PCH_IBX(dev))
+ temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
}
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_output, false, true);
- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2);
+ success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (success && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
}
- intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+/* Special dpms function to support cloning between dvo/sdvo/crt. */
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_crtc *crtc;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
- if (sdvo_priv->is_tv) {
- /* XXX: Restore TV format/enhancements. */
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_sdvo->base.base.crtc;
+ if (!crtc) {
+ intel_sdvo->base.connectors_active = false;
+ return;
}
- intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX);
+ /* We set active outputs manually below in case pipe dpms doesn't change
+ * due to cloning. */
+ if (mode != DRM_MODE_DPMS_ON) {
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
- {
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
+ intel_sdvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
+ } else {
+ intel_sdvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
+ intel_modeset_check_state(connector->dev);
}
-static int intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (sdvo_priv->pixel_clock_min > mode->clock)
+ if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
- if (sdvo_priv->pixel_clock_max < mode->clock)
+ if (intel_sdvo->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
- if (sdvo_priv->is_lvds == true) {
- if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
- return MODE_PANEL;
-
- if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
+ if (intel_sdvo->is_lvds) {
+ if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
-static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps));
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ BUILD_BUG_ON(sizeof(*caps) != 8);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
return false;
- return true;
-}
-
-struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
- struct drm_connector *connector = NULL;
- struct intel_output *iout = NULL;
- struct intel_sdvo_priv *sdvo;
-
- /* find the sdvo connector */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_output(connector);
-
- if (iout->type != INTEL_OUTPUT_SDVO)
- continue;
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
- sdvo = iout->dev_priv;
-
- if (sdvo->output_device == SDVOB && sdvoB)
- return connector;
-
- if (sdvo->output_device == SDVOC && !sdvoB)
- return connector;
-
- }
-
- return NULL;
+ return true;
}
-int intel_sdvo_supports_hotplug(struct drm_connector *connector)
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
- u8 response[2];
- u8 status;
- struct intel_output *intel_output;
- DRM_DEBUG_KMS("\n");
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ uint16_t hotplug;
- if (!connector)
+ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
return 0;
- intel_output = to_intel_output(connector);
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 2);
-
- if (response[0] !=0)
- return 1;
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
- return 0;
+ return hotplug;
}
-void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
- u8 response[2];
- u8 status;
- struct intel_output *intel_output = to_intel_output(connector);
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_output, &response, 2);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- if (on) {
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 2);
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- } else {
- response[0] = 0;
- response[1] = 0;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- }
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
}
static bool
-intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int caps = 0;
-
- if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
- caps++;
- if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
- caps++;
- if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
- caps++;
- if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
- caps++;
- if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
- caps++;
-
- if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
- caps++;
-
- if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
- caps++;
-
- return (caps > 1);
+ /* Is there more than one type of output? */
+ return hweight16(intel_sdvo->caps.output_flags) > 1;
}
-static struct drm_connector *
-intel_find_analog_connector(struct drm_device *dev)
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
{
- struct drm_connector *connector;
- struct intel_output *intel_output;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_output = to_intel_output(connector);
- if (intel_output->type == INTEL_OUTPUT_ANALOG)
- return connector;
- }
- return NULL;
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
}
-static int
-intel_analog_is_connected(struct drm_device *dev)
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_connector *analog_connector;
- analog_connector = intel_find_analog_connector(dev);
-
- if (!analog_connector)
- return false;
-
- if (analog_connector->funcs->detect(analog_connector) ==
- connector_status_disconnected)
- return false;
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
- return true;
+ return drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->vbt.crt_ddc_pin));
}
-enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
+static enum drm_connector_status
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- enum drm_connector_status status = connector_status_connected;
- struct edid *edid = NULL;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ edid = intel_sdvo_get_edid(connector);
- /* when there is no edid and no monitor is connected with VGA
- * port, try to use the CRT ddc to read the EDID for DVI-connector
- */
- if (edid == NULL &&
- sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_output->base.dev))
- edid = drm_get_edid(&intel_output->base,
- sdvo_priv->analog_ddc_bus);
- if (edid != NULL) {
- /* Don't report the output as connected if it's a DVI-I
- * connector with a non-digital EDID coming out.
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
*/
- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- sdvo_priv->is_hdmi =
- drm_detect_hdmi_monitor(edid);
- else
- status = connector_status_disconnected;
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
}
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ intel_sdvo->ddc_bus = saved_ddc;
+ }
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ intel_sdvo->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
+ }
+ } else
+ status = connector_status_disconnected;
kfree(edid);
- intel_output->base.display_info.raw_edid = NULL;
+ }
- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
- status = connector_status_disconnected;
+ if (status == connector_status_connected) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+ intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+ }
return status;
}
-static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
{
- uint16_t response;
- u8 status;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
- if (sdvo_priv->is_tv) {
- /* add 30ms delay when the output type is SDVO-TV */
- mdelay(30);
- }
- status = intel_sdvo_read_response(intel_output, &response, 2);
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
- DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
return connector_status_unknown;
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
+
if (response == 0)
return connector_status_disconnected;
- if (intel_sdvo_multifunc_encoder(intel_output) &&
- sdvo_priv->attached_output != response) {
- if (sdvo_priv->controlled_output != response &&
- intel_sdvo_output_setup(intel_output, response) != true)
- return connector_status_unknown;
- sdvo_priv->attached_output = response;
+ intel_sdvo->attached_output = response;
+
+ intel_sdvo->has_hdmi_monitor = false;
+ intel_sdvo->has_hdmi_audio = false;
+ intel_sdvo->rgb_quant_range_selectable = false;
+
+ if ((intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(intel_sdvo_connector))
+ ret = intel_sdvo_tmds_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
}
- return intel_sdvo_hdmi_sink_detect(connector, response);
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+
+ if (response & SDVO_TV_MASK)
+ intel_sdvo->is_tv = true;
+ if (response & SDVO_LVDS_MASK)
+ intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
}
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int num_modes;
+ struct edid *edid;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(intel_output);
+ edid = intel_sdvo_get_edid(connector);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1656,19 +1818,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
- if (num_modes == 0 &&
- sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_output->base.dev)) {
- struct i2c_adapter *digital_ddc_bus;
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
- /* Switch to the analog ddc bus and try that
- */
- digital_ddc_bus = intel_output->ddc_bus;
- intel_output->ddc_bus = sdvo_priv->analog_ddc_bus;
-
- (void) intel_ddc_get_modes(intel_output);
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
- intel_output->ddc_bus = digital_ddc_bus;
+ kfree(edid);
}
}
@@ -1677,7 +1837,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
-struct drm_display_mode sdvo_tv_modes[] = {
+static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -1739,66 +1899,58 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_output *output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
- uint8_t status;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
/* Read the list of supported input resolutions for the selected TV
* format.
*/
- for (i = 0; i < TV_FORMAT_NUM; i++)
- if (tv_format_names[i] == sdvo_priv->tv_format_name)
- break;
-
- format_map = (1 << i);
+ format_map = 1 << intel_sdvo->tv_format_index;
memcpy(&tv_res, &format_map,
- sizeof(struct intel_sdvo_sdtv_resolution_request) >
- sizeof(format_map) ? sizeof(format_map) :
- sizeof(struct intel_sdvo_sdtv_resolution_request));
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+ return;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
- &tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(output, &reply, 3);
- if (status != SDVO_CMD_STATUS_SUCCESS)
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
return;
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
- &sdvo_tv_modes[i]);
+ &sdvo_tv_modes[i]);
if (nmode)
drm_mode_probed_add(connector, nmode);
}
-
}
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct drm_display_mode *newmode;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
/*
- * Attempt to get the mode list from DDC.
- * Assume that the preferred modes are
- * arranged in priority order.
+ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+ * SDVO->LVDS transcoders can't cope with the EDID mode.
*/
- intel_ddc_get_modes(intel_output);
- if (list_empty(&connector->probed_modes) == false)
- goto end;
-
- /* Fetch modes from VBT */
- if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
- dev_priv->sdvo_lvds_vbt_mode);
+ dev_priv->vbt.sdvo_lvds_vbt_mode);
if (newmode != NULL) {
/* Guarantee the mode is preferred */
newmode->type = (DRM_MODE_TYPE_PREFERRED |
@@ -1807,100 +1959,108 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
}
}
-end:
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- sdvo_priv->sdvo_lvds_fixed_mode =
+ intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
+
+ intel_sdvo->is_lvds = true;
break;
}
}
-
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_output *output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (sdvo_priv->is_tv)
+ if (IS_TV(intel_sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (sdvo_priv->is_lvds == true)
+ else if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
- if (list_empty(&connector->probed_modes))
- return 0;
- return 1;
+ return !list_empty(&connector->probed_modes);
}
-static
-void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+static void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct drm_device *dev = connector->dev;
- if (sdvo_priv->is_tv) {
- if (sdvo_priv->left_property)
- drm_property_destroy(dev, sdvo_priv->left_property);
- if (sdvo_priv->right_property)
- drm_property_destroy(dev, sdvo_priv->right_property);
- if (sdvo_priv->top_property)
- drm_property_destroy(dev, sdvo_priv->top_property);
- if (sdvo_priv->bottom_property)
- drm_property_destroy(dev, sdvo_priv->bottom_property);
- if (sdvo_priv->hpos_property)
- drm_property_destroy(dev, sdvo_priv->hpos_property);
- if (sdvo_priv->vpos_property)
- drm_property_destroy(dev, sdvo_priv->vpos_property);
- }
- if (sdvo_priv->is_tv) {
- if (sdvo_priv->saturation_property)
- drm_property_destroy(dev,
- sdvo_priv->saturation_property);
- if (sdvo_priv->contrast_property)
- drm_property_destroy(dev,
- sdvo_priv->contrast_property);
- if (sdvo_priv->hue_property)
- drm_property_destroy(dev, sdvo_priv->hue_property);
- }
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
- if (sdvo_priv->brightness_property)
- drm_property_destroy(dev,
- sdvo_priv->brightness_property);
- }
- return;
+ if (intel_sdvo_connector->left)
+ drm_property_destroy(dev, intel_sdvo_connector->left);
+ if (intel_sdvo_connector->right)
+ drm_property_destroy(dev, intel_sdvo_connector->right);
+ if (intel_sdvo_connector->top)
+ drm_property_destroy(dev, intel_sdvo_connector->top);
+ if (intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, intel_sdvo_connector->bottom);
+ if (intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, intel_sdvo_connector->hpos);
+ if (intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, intel_sdvo_connector->vpos);
+ if (intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, intel_sdvo_connector->saturation);
+ if (intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, intel_sdvo_connector->contrast);
+ if (intel_sdvo_connector->hue)
+ drm_property_destroy(dev, intel_sdvo_connector->hue);
+ if (intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+ if (intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+ if (intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+ if (intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+ if (intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+ if (intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+ if (intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+ if (intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, intel_sdvo_connector->brightness);
}
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+ if (intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ intel_sdvo_connector->tv_format);
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(connector->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
+ intel_sdvo_destroy_enhance_property(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_sdvo_connector);
+}
- if (sdvo_priv->tv_format_property)
- drm_property_destroy(connector->dev,
- sdvo_priv->tv_format_property);
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_destroy_enhance_property(connector);
+ if (!intel_sdvo->is_hdmi)
+ return false;
- drm_sysfs_connector_remove(connector);
- drm_connector_cleanup(connector);
+ edid = intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
- kfree(intel_output);
+ return has_audio;
}
static int
@@ -1908,145 +2068,169 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
- struct drm_crtc *crtc = encoder->crtc;
- int ret = 0;
- bool changed = false;
- uint8_t cmd, status;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
- ret = drm_connector_property_set_value(connector, property, val);
- if (ret < 0)
- goto out;
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_sdvo_connector->force_audio)
+ return 0;
- if (property == sdvo_priv->tv_format_property) {
- if (val >= TV_FORMAT_NUM) {
- ret = -EINVAL;
- goto out;
+ intel_sdvo_connector->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_sdvo->color_range_auto;
+ uint32_t old_range = intel_sdvo->color_range;
+
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_sdvo->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_sdvo->color_range_auto = false;
+ /* FIXME: this bit is only valid when using TMDS
+ * encoding and 8 bit per color mode. */
+ intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
}
- if (sdvo_priv->tv_format_name ==
- sdvo_priv->tv_format_supported[val])
- goto out;
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
- changed = true;
+ if (old_auto == intel_sdvo->color_range_auto &&
+ old_range == intel_sdvo->color_range)
+ return 0;
+
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (intel_sdvo_connector->name == property) { \
+ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
- cmd = 0;
+ if (property == intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (intel_sdvo->tv_format_index ==
+ intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
- if (sdvo_priv->left_property == property) {
- drm_connector_property_set_value(connector,
- sdvo_priv->right_property, val);
- if (sdvo_priv->left_margin == temp_value)
- goto out;
-
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ if (intel_sdvo_connector->left == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->right, val);
+ if (intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->right_property == property) {
- drm_connector_property_set_value(connector,
- sdvo_priv->left_property, val);
- if (sdvo_priv->right_margin == temp_value)
- goto out;
-
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ goto set_value;
+ } else if (intel_sdvo_connector->right == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->left, val);
+ if (intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->top_property == property) {
- drm_connector_property_set_value(connector,
- sdvo_priv->bottom_property, val);
- if (sdvo_priv->top_margin == temp_value)
- goto out;
-
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ goto set_value;
+ } else if (intel_sdvo_connector->top == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->bottom, val);
+ if (intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->bottom_property == property) {
- drm_connector_property_set_value(connector,
- sdvo_priv->top_property, val);
- if (sdvo_priv->bottom_margin == temp_value)
- goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ goto set_value;
+ } else if (intel_sdvo_connector->bottom == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->top, val);
+ if (intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->hpos_property == property) {
- if (sdvo_priv->cur_hpos == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_priv->cur_hpos = temp_value;
- } else if (sdvo_priv->vpos_property == property) {
- if (sdvo_priv->cur_vpos == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_priv->cur_vpos = temp_value;
- } else if (sdvo_priv->saturation_property == property) {
- if (sdvo_priv->cur_saturation == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_SATURATION;
- sdvo_priv->cur_saturation = temp_value;
- } else if (sdvo_priv->contrast_property == property) {
- if (sdvo_priv->cur_contrast == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_priv->cur_contrast = temp_value;
- } else if (sdvo_priv->hue_property == property) {
- if (sdvo_priv->cur_hue == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_HUE;
- sdvo_priv->cur_hue = temp_value;
- } else if (sdvo_priv->brightness_property == property) {
- if (sdvo_priv->cur_brightness == temp_value)
- goto out;
-
- cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_priv->cur_brightness = temp_value;
- }
- if (cmd) {
- intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2);
- status = intel_sdvo_read_response(intel_output,
- NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO command \n");
- return -EINVAL;
- }
- changed = true;
+ goto set_value;
}
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
- if (changed && crtc)
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
-out:
- return ret;
-}
-static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
- .dpms = intel_sdvo_dpms,
- .mode_fixup = intel_sdvo_mode_fixup,
- .prepare = intel_encoder_prepare,
- .mode_set = intel_sdvo_mode_set,
- .commit = intel_encoder_commit,
-};
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (intel_sdvo->base.base.crtc)
+ intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
+
+ return 0;
+#undef CHECK_PROPERTY
+}
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .save = intel_sdvo_save,
- .restore = intel_sdvo_restore,
+ .dpms = intel_sdvo_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2061,23 +2245,22 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- drm_encoder_cleanup(encoder);
+ struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
+
+ if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+
+ i2c_del_adapter(&intel_sdvo->ddc);
+ intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
.destroy = intel_sdvo_enc_destroy,
};
-
-/**
- * Choose the appropriate DDC bus for control bus switch command for this
- * SDVO output based on the controlled output.
- *
- * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
- * outputs, then LVDS outputs.
- */
static void
-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
{
uint16_t mask = 0;
unsigned int num_bits;
@@ -2085,7 +2268,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
/* Make a mask of outputs less than or equal to our own priority in the
* list.
*/
- switch (dev_priv->controlled_output) {
+ switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
case SDVO_OUTPUT_LVDS0:
@@ -2102,82 +2285,85 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
}
/* Count bits to find what number we are in the priority list. */
- mask &= dev_priv->caps.output_flags;
+ mask &= sdvo->caps.output_flags;
num_bits = hweight16(mask);
- if (num_bits > 3) {
- /* if more than 3 outputs, default to DDC bus 3 for now */
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
num_bits = 3;
- }
/* Corresponds to SDVO_CONTROL_BUS_DDCx */
- dev_priv->ddc_bus = 1 << num_bits;
+ sdvo->ddc_bus = 1 << num_bits;
}
-static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
{
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- uint8_t status;
+ struct sdvo_device_mapping *mapping;
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ if (sdvo->is_sdvob)
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
- return true;
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
}
-static struct intel_output *
-intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
{
- struct drm_device *dev = chan->drm_dev;
- struct drm_connector *connector;
- struct intel_output *intel_output = NULL;
+ struct sdvo_device_mapping *mapping;
+ u8 pin;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, head) {
- if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
- intel_output = to_intel_output(connector);
- break;
- }
- }
- return intel_output;
-}
+ if (sdvo->is_sdvob)
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
-static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg msgs[], int num)
-{
- struct intel_output *intel_output;
- struct intel_sdvo_priv *sdvo_priv;
- struct i2c_algo_bit_data *algo_data;
- const struct i2c_algorithm *algo;
+ if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
+ pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PORT_DPB;
- algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_output =
- intel_sdvo_chan_to_intel_output(
- (struct intel_i2c_chan *)(algo_data->data));
- if (intel_output == NULL)
- return -EINVAL;
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- sdvo_priv = intel_output->dev_priv;
- algo = intel_output->i2c_bus->algo;
+ /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now. */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+}
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
- return algo->master_xfer(i2c_adap, msgs, num);
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+ intel_gmbus_force_bit(sdvo->i2c, false);
}
-static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
- .master_xfer = intel_sdvo_master_xfer,
-};
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+{
+ return intel_sdvo_check_supp_encode(intel_sdvo);
+}
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (output_device == SDVOB) {
+ if (sdvo->is_sdvob) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -2202,599 +2388,703 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (output_device == SDVOB)
+ if (sdvo->is_sdvob)
return 0x70;
else
return 0x72;
}
+static void
+intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
+{
+ struct drm_connector *drm_connector;
+ struct intel_sdvo *sdvo_encoder;
+
+ drm_connector = &intel_connector->base;
+ sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
+
+ sysfs_remove_link(&drm_connector->kdev->kobj,
+ sdvo_encoder->ddc.dev.kobj.name);
+ intel_connector_unregister(intel_connector);
+}
+
+static int
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
+{
+ struct drm_connector *drm_connector;
+ int ret;
+
+ drm_connector = &connector->base.base;
+ ret = drm_connector_init(encoder->base.base.dev,
+ drm_connector,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+ if (ret < 0)
+ return ret;
+
+ drm_connector_helper_add(drm_connector,
+ &intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 1;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+ connector->base.unregister = intel_sdvo_connector_unregister;
+
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+ ret = drm_sysfs_connector_add(drm_connector);
+ if (ret < 0)
+ goto err1;
+
+ ret = sysfs_create_link(&drm_connector->kdev->kobj,
+ &encoder->ddc.dev.kobj,
+ encoder->ddc.dev.kobj.name);
+ if (ret < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ drm_sysfs_connector_remove(drm_connector);
+err1:
+ drm_connector_cleanup(drm_connector);
+
+ return ret;
+}
+
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *connector)
+{
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_sdvo->color_range_auto = true;
+ }
+}
+
static bool
-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_connector *connector = &intel_output->base;
- struct drm_encoder *encoder = &intel_output->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- bool ret = true, registered = false;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising DVI device %d\n", device);
- sdvo_priv->is_tv = false;
- intel_output->needs_tv_clock = false;
- sdvo_priv->is_lvds = false;
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
- if (device_is_registered(&connector->kdev)) {
- drm_sysfs_connector_remove(connector);
- registered = true;
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
- if (flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
- else
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-
- encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
- connector->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- if (intel_sdvo_get_supp_encode(intel_output,
- &sdvo_priv->encode) &&
- intel_sdvo_get_digital_encoding_mode(intel_output) &&
- sdvo_priv->is_hdmi) {
- /* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_output,
- SDVO_COLORIMETRY_RGB256);
- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_output->clone_mask =
- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- }
- } else if (flags & SDVO_OUTPUT_SVID0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_output->needs_tv_clock = true;
- intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_RGB0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_RGB1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_LVDS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_LVDS1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
+ /* Some SDVO devices have one-shot hotplug interrupts.
+ * Ensure that they get re-enabled when an interrupt happens.
+ */
+ intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_sdvo_enable_hotplug(intel_encoder);
} else {
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ intel_sdvo->is_hdmi = true;
+ }
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ if (intel_sdvo->is_hdmi)
+ intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
+
+ intel_sdvo->is_tv = true;
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
+
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ drm_sysfs_connector_remove(connector);
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+ intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+err:
+ drm_sysfs_connector_remove(connector);
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+{
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
- sdvo_priv->controlled_output = 0;
- memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
+ intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(sdvo_priv),
+ SDVO_NAME(intel_sdvo),
bytes[0], bytes[1]);
- ret = false;
+ return false;
}
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
-
- if (ret && registered)
- ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ return true;
+}
- return ret;
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+ drm_sysfs_connector_remove(connector);
+ intel_sdvo_destroy(connector);
+ }
+ }
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
- uint8_t status;
- intel_sdvo_set_target_output(intel_output,
- sdvo_priv->controlled_output);
+ if (!intel_sdvo_set_target_output(intel_sdvo, type))
+ return false;
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &format, sizeof(format));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return;
+ BUILD_BUG_ON(sizeof(format) != 6);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
- memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
- sizeof(format_map) : sizeof(format));
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
if (format_map == 0)
- return;
+ return false;
- sdvo_priv->format_supported_num = 0;
+ intel_sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
- if (format_map & (1 << i)) {
- sdvo_priv->tv_format_supported
- [sdvo_priv->format_supported_num++] =
- tv_format_names[i];
- }
+ if (format_map & (1 << i))
+ intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
- sdvo_priv->tv_format_property =
- drm_property_create(
- connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_priv->format_supported_num);
+ intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
+ if (!intel_sdvo_connector->tv_format)
+ return false;
- for (i = 0; i < sdvo_priv->format_supported_num; i++)
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_priv->tv_format_property, i,
- i, sdvo_priv->tv_format_supported[i]);
+ intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
- drm_connector_attach_property(
- connector, sdvo_priv->tv_format_property, 0);
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
}
-static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ intel_sdvo_connector->max_##name = data_value[0]; \
+ intel_sdvo_connector->cur_##name = response; \
+ intel_sdvo_connector->name = \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ if (!intel_sdvo_connector->name) return false; \
+ drm_object_attach_property(&connector->base, \
+ intel_sdvo_connector->name, \
+ intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while (0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- struct intel_sdvo_enhancements_reply sdvo_data;
- struct drm_device *dev = connector->dev;
- uint8_t status;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- NULL, 0);
- status = intel_sdvo_read_response(intel_output, &sdvo_data,
- sizeof(sdvo_data));
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS(" incorrect response is returned\n");
- return;
- }
- response = *((uint16_t *)&sdvo_data);
- if (!response) {
- DRM_DEBUG_KMS("No enhancement is supported\n");
- return;
- }
- if (sdvo_priv->is_tv) {
- /* when horizontal overscan is supported, Add the left/right
- * property
- */
- if (sdvo_data.overscan_h) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO max "
- "h_overscan\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
- return;
- }
- sdvo_priv->max_hscan = data_value[0];
- sdvo_priv->left_margin = data_value[0] - response;
- sdvo_priv->right_margin = sdvo_priv->left_margin;
- sdvo_priv->left_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
- sdvo_priv->left_property->values[0] = 0;
- sdvo_priv->left_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->left_property,
- sdvo_priv->left_margin);
- sdvo_priv->right_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
- sdvo_priv->right_property->values[0] = 0;
- sdvo_priv->right_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->right_property,
- sdvo_priv->right_margin);
- DRM_DEBUG_KMS("h_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.overscan_v) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO max "
- "v_overscan\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
- return;
- }
- sdvo_priv->max_vscan = data_value[0];
- sdvo_priv->top_margin = data_value[0] - response;
- sdvo_priv->bottom_margin = sdvo_priv->top_margin;
- sdvo_priv->top_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
- sdvo_priv->top_property->values[0] = 0;
- sdvo_priv->top_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->top_property,
- sdvo_priv->top_margin);
- sdvo_priv->bottom_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
- sdvo_priv->bottom_property->values[0] = 0;
- sdvo_priv->bottom_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->bottom_property,
- sdvo_priv->bottom_margin);
- DRM_DEBUG_KMS("v_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.position_h) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
- return;
- }
- sdvo_priv->max_hpos = data_value[0];
- sdvo_priv->cur_hpos = response;
- sdvo_priv->hpos_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hpos", 2);
- sdvo_priv->hpos_property->values[0] = 0;
- sdvo_priv->hpos_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->hpos_property,
- sdvo_priv->cur_hpos);
- DRM_DEBUG_KMS("h_position: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.position_v) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
- return;
- }
- sdvo_priv->max_vpos = data_value[0];
- sdvo_priv->cur_vpos = response;
- sdvo_priv->vpos_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "vpos", 2);
- sdvo_priv->vpos_property->values[0] = 0;
- sdvo_priv->vpos_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->vpos_property,
- sdvo_priv->cur_vpos);
- DRM_DEBUG_KMS("v_position: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_hscan = data_value[0];
+ intel_sdvo_connector->left_margin = data_value[0] - response;
+ intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+ intel_sdvo_connector->left =
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->left)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->left,
+ intel_sdvo_connector->left_margin);
+
+ intel_sdvo_connector->right =
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->right)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->right,
+ intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
- if (sdvo_priv->is_tv) {
- if (sdvo_data.saturation) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
- return;
- }
- sdvo_priv->max_saturation = data_value[0];
- sdvo_priv->cur_saturation = response;
- sdvo_priv->saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- sdvo_priv->saturation_property->values[0] = 0;
- sdvo_priv->saturation_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->saturation_property,
- sdvo_priv->cur_saturation);
- DRM_DEBUG_KMS("saturation: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.contrast) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
- return;
- }
- sdvo_priv->max_contrast = data_value[0];
- sdvo_priv->cur_contrast = response;
- sdvo_priv->contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- sdvo_priv->contrast_property->values[0] = 0;
- sdvo_priv->contrast_property->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->contrast_property,
- sdvo_priv->cur_contrast);
- DRM_DEBUG_KMS("contrast: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
- if (sdvo_data.hue) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
- return;
- }
- sdvo_priv->max_hue = data_value[0];
- sdvo_priv->cur_hue = response;
- sdvo_priv->hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- sdvo_priv->hue_property->values[0] = 0;
- sdvo_priv->hue_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->hue_property,
- sdvo_priv->cur_hue);
- DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
+
+ if (enhancements.overscan_v) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_vscan = data_value[0];
+ intel_sdvo_connector->top_margin = data_value[0] - response;
+ intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+ intel_sdvo_connector->top =
+ drm_property_create_range(dev, 0,
+ "top_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->top)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->top,
+ intel_sdvo_connector->top_margin);
+
+ intel_sdvo_connector->bottom =
+ drm_property_create_range(dev, 0,
+ "bottom_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->bottom)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->bottom,
+ intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
- if (sdvo_data.brightness) {
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &data_value, 4);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
- return;
- }
- intel_sdvo_write_cmd(intel_output,
- SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
- &response, 2);
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
- return;
- }
- sdvo_priv->max_brightness = data_value[0];
- sdvo_priv->cur_brightness = response;
- sdvo_priv->brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- sdvo_priv->brightness_property->values[0] = 0;
- sdvo_priv->brightness_property->values[1] =
- data_value[0];
- drm_connector_attach_property(connector,
- sdvo_priv->brightness_property,
- sdvo_priv->cur_brightness);
- DRM_DEBUG_KMS("brightness: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
- }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_dot_crawl = 1;
+ intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ intel_sdvo_connector->dot_crawl =
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ if (!intel_sdvo_connector->dot_crawl)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->dot_crawl,
+ intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
}
- return;
+
+ return true;
}
-bool intel_sdvo_init(struct drm_device *dev, int output_device)
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_output *intel_output;
- struct intel_sdvo_priv *sdvo_priv;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
- u8 ch[0x40];
- int i;
+ ENHANCEMENT(brightness, BRIGHTNESS);
- intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
- if (!intel_output) {
- return false;
- }
+ return true;
+}
+#undef ENHANCEMENT
- sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
- sdvo_priv->output_device = output_device;
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector)
+{
+ union {
+ struct intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
- intel_output->dev_priv = sdvo_priv;
- intel_output->type = INTEL_OUTPUT_SDVO;
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
- /* setup the DDC bus. */
- if (output_device == SDVOB)
- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
- else
- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ enhancements.response = 0;
+ intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
- if (!intel_output->i2c_bus)
- goto err_inteloutput;
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else if (IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
- sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
- /* Save the bit-banging i2c functionality for use by the DDC wrapper */
- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
+ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
- /* Read the regs to test if we can talk to the device */
- for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
- DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
- goto err_i2c;
- }
- }
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
- /* setup the DDC bus. */
- if (output_device == SDVOB) {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
- "SDVOB/VGA DDC BUS");
- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
- } else {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
- "SDVOC/VGA DDC BUS");
- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
- }
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
- if (intel_output->ddc_bus == NULL)
- goto err_i2c;
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+ .master_xfer = intel_sdvo_ddc_proxy_xfer,
+ .functionality = intel_sdvo_ddc_proxy_func
+};
- /* Wrap with our custom algo which switches to DDC mode */
- intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
- /* In default case sdvo lvds is false */
- intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
+ return i2c_add_adapter(&sdvo->ddc) == 0;
+}
- if (intel_sdvo_output_setup(intel_output,
- sdvo_priv->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
- goto err_i2c;
- }
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
+ int i;
+ intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
+ if (!intel_sdvo)
+ return false;
+ intel_sdvo->sdvo_reg = sdvo_reg;
+ intel_sdvo->is_sdvob = is_sdvob;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ goto err_i2c_bus;
- connector = &intel_output->base;
- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
+ /* encoder type will be decided later */
+ intel_encoder = &intel_sdvo->base;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
- drm_encoder_init(dev, &intel_output->enc,
- &intel_sdvo_enc_funcs, intel_output->enc.encoder_type);
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
+ goto err;
+ }
+ }
- drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
+ intel_encoder->compute_config = intel_sdvo_compute_config;
+ intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->pre_enable = intel_sdvo_pre_enable;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+ intel_encoder->get_config = intel_sdvo_get_config;
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- if (sdvo_priv->is_tv)
- intel_sdvo_tv_create_property(connector);
+ /* In default case sdvo lvds is false */
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err;
+
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
+ }
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_create_enhance_property(connector);
+ /* Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active) {
+ intel_encoder->hpd_pin =
+ intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
+ }
- drm_sysfs_connector_add(connector);
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = 0;
- intel_sdvo_select_ddc_bus(sdvo_priv);
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_output, true, false);
-
- intel_sdvo_get_input_pixel_clock_range(intel_output,
- &sdvo_priv->pixel_clock_min,
- &sdvo_priv->pixel_clock_max);
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err_output;
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+ goto err_output;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
- SDVO_NAME(sdvo_priv),
- sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
- sdvo_priv->caps.device_rev_id,
- sdvo_priv->pixel_clock_min / 1000,
- sdvo_priv->pixel_clock_max / 1000,
- (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
- (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
- sdvo_priv->caps.output_flags &
+ intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
- sdvo_priv->caps.output_flags &
+ intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
-
return true;
-err_i2c:
- if (sdvo_priv->analog_ddc_bus != NULL)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
- if (intel_output->ddc_bus != NULL)
- intel_i2c_destroy(intel_output->ddc_bus);
- if (intel_output->i2c_bus != NULL)
- intel_i2c_destroy(intel_output->i2c_bus);
-err_inteloutput:
- kfree(intel_output);
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
+ kfree(intel_sdvo);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index ba5cdf8ae40..2e2d4eb4a00 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006-2007 Intel Corporation
+ * Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -46,64 +46,69 @@
#define SDVO_OUTPUT_LAST (14)
struct intel_sdvo_caps {
- u8 vendor_id;
- u8 device_id;
- u8 device_rev_id;
- u8 sdvo_version_major;
- u8 sdvo_version_minor;
- unsigned int sdvo_inputs_mask:2;
- unsigned int smooth_scaling:1;
- unsigned int sharp_scaling:1;
- unsigned int up_scaling:1;
- unsigned int down_scaling:1;
- unsigned int stall_support:1;
- unsigned int pad:1;
- u16 output_flags;
-} __attribute__((packed));
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __packed;
+
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
- struct {
- u16 clock; /**< pixel clock, in 10kHz units */
- u8 h_active; /**< lower 8 bits (pixels) */
- u8 h_blank; /**< lower 8 bits (pixels) */
- u8 h_high; /**< upper 4 bits each h_active, h_blank */
- u8 v_active; /**< lower 8 bits (lines) */
- u8 v_blank; /**< lower 8 bits (lines) */
- u8 v_high; /**< upper 4 bits each v_active, v_blank */
- } part1;
-
- struct {
- u8 h_sync_off; /**< lower 8 bits, from hblank start */
- u8 h_sync_width; /**< lower 8 bits (pixels) */
- /** lower 4 bits each vsync offset, vsync width */
- u8 v_sync_off_width;
- /**
- * 2 high bits of hsync offset, 2 high bits of hsync width,
- * bits 4-5 of vsync offset, and 2 high bits of vsync width.
- */
- u8 sync_off_width_high;
- u8 dtd_flags;
- u8 sdvo_flags;
- /** bits 6-7 of vsync offset at bits 6-7 */
- u8 v_sync_off_high;
- u8 reserved;
- } part2;
-} __attribute__((packed));
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __packed;
struct intel_sdvo_pixel_clock_range {
- u16 min; /**< pixel clock, in 10kHz units */
- u16 max; /**< pixel clock, in 10kHz units */
-} __attribute__((packed));
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __packed;
struct intel_sdvo_preferred_input_timing_args {
- u16 clock;
- u16 width;
- u16 height;
- u8 interlace:1;
- u8 scaled:1;
- u8 pad:6;
-} __attribute__((packed));
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __packed;
/* I2C registers for SDVO */
#define SDVO_I2C_ARG_0 0x07
@@ -154,10 +159,10 @@ struct intel_sdvo_preferred_input_timing_args {
*/
#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
struct intel_sdvo_get_trained_inputs_response {
- unsigned int input0_trained:1;
- unsigned int input1_trained:1;
- unsigned int pad:6;
-} __attribute__((packed));
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __packed;
/** Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
@@ -177,7 +182,7 @@ struct intel_sdvo_get_trained_inputs_response {
*/
#define SDVO_CMD_GET_IN_OUT_MAP 0x06
struct intel_sdvo_in_out_map {
- u16 in0, in1;
+ u16 in0, in1;
};
/**
@@ -210,11 +215,11 @@ struct intel_sdvo_in_out_map {
#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
struct intel_sdvo_get_interrupt_event_source_response {
- u16 interrupt_status;
- unsigned int ambient_light_interrupt:1;
- unsigned int hdmi_audio_encrypt_change:1;
- unsigned int pad:6;
-} __attribute__((packed));
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __packed;
/**
* Selects which input is affected by future input commands.
@@ -225,12 +230,12 @@ struct intel_sdvo_get_interrupt_event_source_response {
*/
#define SDVO_CMD_SET_TARGET_INPUT 0x10
struct intel_sdvo_set_target_input_args {
- unsigned int target_1:1;
- unsigned int pad:7;
-} __attribute__((packed));
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __packed;
/**
- * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
* Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
@@ -312,60 +317,60 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-/** 5 bytes of bit flags for TV formats shared by all TV format functions */
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
- unsigned int ntsc_m:1;
- unsigned int ntsc_j:1;
- unsigned int ntsc_443:1;
- unsigned int pal_b:1;
- unsigned int pal_d:1;
- unsigned int pal_g:1;
- unsigned int pal_h:1;
- unsigned int pal_i:1;
-
- unsigned int pal_m:1;
- unsigned int pal_n:1;
- unsigned int pal_nc:1;
- unsigned int pal_60:1;
- unsigned int secam_b:1;
- unsigned int secam_d:1;
- unsigned int secam_g:1;
- unsigned int secam_k:1;
-
- unsigned int secam_k1:1;
- unsigned int secam_l:1;
- unsigned int secam_60:1;
- unsigned int hdtv_std_smpte_240m_1080i_59:1;
- unsigned int hdtv_std_smpte_240m_1080i_60:1;
- unsigned int hdtv_std_smpte_260m_1080i_59:1;
- unsigned int hdtv_std_smpte_260m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080i_50:1;
-
- unsigned int hdtv_std_smpte_274m_1080i_59:1;
- unsigned int hdtv_std_smpte_274m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080p_23:1;
- unsigned int hdtv_std_smpte_274m_1080p_24:1;
- unsigned int hdtv_std_smpte_274m_1080p_25:1;
- unsigned int hdtv_std_smpte_274m_1080p_29:1;
- unsigned int hdtv_std_smpte_274m_1080p_30:1;
- unsigned int hdtv_std_smpte_274m_1080p_50:1;
-
- unsigned int hdtv_std_smpte_274m_1080p_59:1;
- unsigned int hdtv_std_smpte_274m_1080p_60:1;
- unsigned int hdtv_std_smpte_295m_1080i_50:1;
- unsigned int hdtv_std_smpte_295m_1080p_50:1;
- unsigned int hdtv_std_smpte_296m_720p_59:1;
- unsigned int hdtv_std_smpte_296m_720p_60:1;
- unsigned int hdtv_std_smpte_296m_720p_50:1;
- unsigned int hdtv_std_smpte_293m_480p_59:1;
-
- unsigned int hdtv_std_smpte_170m_480i_59:1;
- unsigned int hdtv_std_iturbt601_576i_50:1;
- unsigned int hdtv_std_iturbt601_576p_50:1;
- unsigned int hdtv_std_eia_7702a_480i_60:1;
- unsigned int hdtv_std_eia_7702a_480p_60:1;
- unsigned int pad:3;
-} __attribute__((packed));
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __packed;
#define SDVO_CMD_GET_TV_FORMAT 0x28
@@ -374,145 +379,145 @@ struct intel_sdvo_tv_format {
/** Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
- unsigned int ntsc_m:1;
- unsigned int ntsc_j:1;
- unsigned int ntsc_443:1;
- unsigned int pal_b:1;
- unsigned int pal_d:1;
- unsigned int pal_g:1;
- unsigned int pal_h:1;
- unsigned int pal_i:1;
-
- unsigned int pal_m:1;
- unsigned int pal_n:1;
- unsigned int pal_nc:1;
- unsigned int pal_60:1;
- unsigned int secam_b:1;
- unsigned int secam_d:1;
- unsigned int secam_g:1;
- unsigned int secam_k:1;
-
- unsigned int secam_k1:1;
- unsigned int secam_l:1;
- unsigned int secam_60:1;
- unsigned int pad:5;
-} __attribute__((packed));
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __packed;
struct intel_sdvo_sdtv_resolution_reply {
- unsigned int res_320x200:1;
- unsigned int res_320x240:1;
- unsigned int res_400x300:1;
- unsigned int res_640x350:1;
- unsigned int res_640x400:1;
- unsigned int res_640x480:1;
- unsigned int res_704x480:1;
- unsigned int res_704x576:1;
-
- unsigned int res_720x350:1;
- unsigned int res_720x400:1;
- unsigned int res_720x480:1;
- unsigned int res_720x540:1;
- unsigned int res_720x576:1;
- unsigned int res_768x576:1;
- unsigned int res_800x600:1;
- unsigned int res_832x624:1;
-
- unsigned int res_920x766:1;
- unsigned int res_1024x768:1;
- unsigned int res_1280x1024:1;
- unsigned int pad:5;
-} __attribute__((packed));
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __packed;
/* Get supported resolution with squire pixel aspect ratio that can be
scaled for the requested HDTV format */
#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
struct intel_sdvo_hdtv_resolution_request {
- unsigned int hdtv_std_smpte_240m_1080i_59:1;
- unsigned int hdtv_std_smpte_240m_1080i_60:1;
- unsigned int hdtv_std_smpte_260m_1080i_59:1;
- unsigned int hdtv_std_smpte_260m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080i_50:1;
- unsigned int hdtv_std_smpte_274m_1080i_59:1;
- unsigned int hdtv_std_smpte_274m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080p_23:1;
-
- unsigned int hdtv_std_smpte_274m_1080p_24:1;
- unsigned int hdtv_std_smpte_274m_1080p_25:1;
- unsigned int hdtv_std_smpte_274m_1080p_29:1;
- unsigned int hdtv_std_smpte_274m_1080p_30:1;
- unsigned int hdtv_std_smpte_274m_1080p_50:1;
- unsigned int hdtv_std_smpte_274m_1080p_59:1;
- unsigned int hdtv_std_smpte_274m_1080p_60:1;
- unsigned int hdtv_std_smpte_295m_1080i_50:1;
-
- unsigned int hdtv_std_smpte_295m_1080p_50:1;
- unsigned int hdtv_std_smpte_296m_720p_59:1;
- unsigned int hdtv_std_smpte_296m_720p_60:1;
- unsigned int hdtv_std_smpte_296m_720p_50:1;
- unsigned int hdtv_std_smpte_293m_480p_59:1;
- unsigned int hdtv_std_smpte_170m_480i_59:1;
- unsigned int hdtv_std_iturbt601_576i_50:1;
- unsigned int hdtv_std_iturbt601_576p_50:1;
-
- unsigned int hdtv_std_eia_7702a_480i_60:1;
- unsigned int hdtv_std_eia_7702a_480p_60:1;
- unsigned int pad:6;
-} __attribute__((packed));
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __packed;
struct intel_sdvo_hdtv_resolution_reply {
- unsigned int res_640x480:1;
- unsigned int res_800x600:1;
- unsigned int res_1024x768:1;
- unsigned int res_1280x960:1;
- unsigned int res_1400x1050:1;
- unsigned int res_1600x1200:1;
- unsigned int res_1920x1440:1;
- unsigned int res_2048x1536:1;
-
- unsigned int res_2560x1920:1;
- unsigned int res_3200x2400:1;
- unsigned int res_3840x2880:1;
- unsigned int pad1:5;
-
- unsigned int res_848x480:1;
- unsigned int res_1064x600:1;
- unsigned int res_1280x720:1;
- unsigned int res_1360x768:1;
- unsigned int res_1704x960:1;
- unsigned int res_1864x1050:1;
- unsigned int res_1920x1080:1;
- unsigned int res_2128x1200:1;
-
- unsigned int res_2560x1400:1;
- unsigned int res_2728x1536:1;
- unsigned int res_3408x1920:1;
- unsigned int res_4264x2400:1;
- unsigned int res_5120x2880:1;
- unsigned int pad2:3;
-
- unsigned int res_768x480:1;
- unsigned int res_960x600:1;
- unsigned int res_1152x720:1;
- unsigned int res_1124x768:1;
- unsigned int res_1536x960:1;
- unsigned int res_1680x1050:1;
- unsigned int res_1728x1080:1;
- unsigned int res_1920x1200:1;
-
- unsigned int res_2304x1440:1;
- unsigned int res_2456x1536:1;
- unsigned int res_3072x1920:1;
- unsigned int res_3840x2400:1;
- unsigned int res_4608x2880:1;
- unsigned int pad3:3;
-
- unsigned int res_1280x1024:1;
- unsigned int pad4:7;
-
- unsigned int res_1280x768:1;
- unsigned int pad5:7;
-} __attribute__((packed));
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __packed;
/* Get supported power state returns info for encoder and monitor, rely on
last SetTargetInput and SetTargetOutput calls */
@@ -539,43 +544,43 @@ struct intel_sdvo_hdtv_resolution_reply {
* The high fields are bits 8:9 of the 10-bit values.
*/
struct sdvo_panel_power_sequencing {
- u8 t0;
- u8 t1;
- u8 t2;
- u8 t3;
- u8 t4;
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
- unsigned int t0_high:2;
- unsigned int t1_high:2;
- unsigned int t2_high:2;
- unsigned int t3_high:2;
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
- unsigned int t4_high:2;
- unsigned int pad:6;
-} __attribute__((packed));
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __packed;
#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
struct sdvo_max_backlight_reply {
- u8 max_value;
- u8 default_value;
-} __attribute__((packed));
+ u8 max_value;
+ u8 default_value;
+} __packed;
#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
struct sdvo_get_ambient_light_reply {
- u16 trip_low;
- u16 trip_high;
- u16 value;
-} __attribute__((packed));
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __packed;
#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
struct sdvo_set_ambient_light_reply {
- u16 trip_low;
- u16 trip_high;
- unsigned int enable:1;
- unsigned int pad:7;
-} __attribute__((packed));
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __packed;
/* Set display power state */
#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
@@ -586,46 +591,46 @@ struct sdvo_set_ambient_light_reply {
#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
struct intel_sdvo_enhancements_reply {
- unsigned int flicker_filter:1;
- unsigned int flicker_filter_adaptive:1;
- unsigned int flicker_filter_2d:1;
- unsigned int saturation:1;
- unsigned int hue:1;
- unsigned int brightness:1;
- unsigned int contrast:1;
- unsigned int overscan_h:1;
-
- unsigned int overscan_v:1;
- unsigned int position_h:1;
- unsigned int position_v:1;
- unsigned int sharpness:1;
- unsigned int dot_crawl:1;
- unsigned int dither:1;
- unsigned int max_tv_chroma_filter:1;
- unsigned int max_tv_luma_filter:1;
-} __attribute__((packed));
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __packed;
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
*/
-#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
-#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
-#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
#define SDVO_CMD_GET_MAX_SATURATION 0x55
#define SDVO_CMD_GET_MAX_HUE 0x58
#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
-#define SDVO_CMD_GET_MAX_POSITION_H 0x67
-#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
-#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
-#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
-#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
struct intel_sdvo_enhancement_limits_reply {
- u16 max_value;
- u16 default_value;
-} __attribute__((packed));
+ u16 max_value;
+ u16 default_value;
+} __packed;
#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
@@ -638,10 +643,10 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
-#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
-#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
-#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
-#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
#define SDVO_CMD_GET_SATURATION 0x56
#define SDVO_CMD_SET_SATURATION 0x57
#define SDVO_CMD_GET_HUE 0x59
@@ -654,19 +659,19 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_OVERSCAN_H 0x63
#define SDVO_CMD_GET_OVERSCAN_V 0x65
#define SDVO_CMD_SET_OVERSCAN_V 0x66
-#define SDVO_CMD_GET_POSITION_H 0x68
-#define SDVO_CMD_SET_POSITION_H 0x69
-#define SDVO_CMD_GET_POSITION_V 0x6b
-#define SDVO_CMD_SET_POSITION_V 0x6c
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
#define SDVO_CMD_GET_SHARPNESS 0x6e
#define SDVO_CMD_SET_SHARPNESS 0x6f
-#define SDVO_CMD_GET_TV_CHROMA 0x75
-#define SDVO_CMD_SET_TV_CHROMA 0x76
-#define SDVO_CMD_GET_TV_LUMA 0x78
-#define SDVO_CMD_SET_TV_LUMA 0x79
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
- u16 value;
-}__attribute__((packed));
+ u16 value;
+} __packed;
#define SDVO_CMD_GET_DOT_CRAWL 0x70
#define SDVO_CMD_SET_DOT_CRAWL 0x71
@@ -703,6 +708,8 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
#define SDVO_CMD_SET_HBUF_INDEX 0x93
+ #define SDVO_HBUF_INDEX_ELD 0
+ #define SDVO_HBUF_INDEX_AVI_IF 1
#define SDVO_CMD_GET_HBUF_INDEX 0x94
#define SDVO_CMD_GET_HBUF_INFO 0x95
#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
@@ -717,7 +724,7 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
#define SDVO_NEED_TO_STALL (1 << 7)
-struct intel_sdvo_encode{
- u8 dvi_rev;
- u8 hdmi_rev;
-} __attribute__ ((packed));
+struct intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __packed;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
new file mode 100644
index 00000000000..01d841ea314
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
+
+/* Standard MMIO read, non-posted */
+#define SB_MRD_NP 0x00
+/* Standard MMIO write, non-posted */
+#define SB_MWR_NP 0x01
+/* Private register read, double-word addressing, non-posted */
+#define SB_CRRDDA_NP 0x06
+/* Private register write, double-word addressing, non-posted */
+#define SB_CRWRDA_NP 0x07
+
+static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
+ u32 port, u32 opcode, u32 addr, u32 *val)
+{
+ u32 cmd, be = 0xf, bar = 0;
+ bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+
+ cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
+ (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
+ (bar << IOSF_BAR_SHIFT);
+
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(VLV_IOSF_ADDR, addr);
+ if (!is_read)
+ I915_WRITE(VLV_IOSF_DATA, *val);
+ I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+
+ if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ return -ETIMEDOUT;
+ }
+
+ if (is_read)
+ *val = I915_READ(VLV_IOSF_DATA);
+ I915_WRITE(VLV_IOSF_DATA, 0);
+
+ return 0;
+}
+
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+{
+ u32 val = 0;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ SB_CRRDDA_NP, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return val;
+}
+
+void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ SB_CRWRDA_NP, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+{
+ u32 val = 0;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
+ SB_CRRDDA_NP, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return val;
+}
+
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+}
+
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+}
+
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+}
+
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
+ SB_MRD_NP, reg, &val);
+
+ /*
+ * FIXME: There might be some registers where all 1's is a valid value,
+ * so ideally we should check the register offset instead...
+ */
+ WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
+ pipe_name(pipe), reg, val);
+
+ return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
+ SB_MWR_NP, reg, &val);
+}
+
+/* SBI access */
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 value = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ return 0;
+ }
+
+ I915_WRITE(SBI_ADDR, (reg << 16));
+
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ return 0;
+ }
+
+ return I915_READ(SBI_DATA);
+}
+
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ u32 tmp;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ return;
+ }
+
+ I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, value);
+
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ return;
+ }
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+ reg, &val);
+ return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+ reg, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644
index 00000000000..9a17b4e92ef
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -0,0 +1,1314 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
+{
+ /* paranoia */
+ if (!mode->crtc_htotal)
+ return 1;
+
+ return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
+}
+
+static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+{
+ struct drm_device *dev = crtc->base.dev;
+ const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+ DEFINE_WAIT(wait);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
+
+ vblank_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ /* FIXME needs to be calibrated sensibly */
+ min = vblank_start - usecs_to_scanlines(mode, 100);
+ max = vblank_start - 1;
+
+ if (min <= 0 || max <= 0)
+ return false;
+
+ if (WARN_ON(drm_vblank_get(dev, pipe)))
+ return false;
+
+ local_irq_disable();
+
+ trace_i915_pipe_update_start(crtc, min, max);
+
+ for (;;) {
+ /*
+ * prepare_to_wait() has a memory barrier, which guarantees
+ * other CPUs can see the task state update by the time we
+ * read the scanline.
+ */
+ prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
+
+ scanline = intel_get_crtc_scanline(crtc);
+ if (scanline < min || scanline > max)
+ break;
+
+ if (timeout <= 0) {
+ DRM_ERROR("Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
+ break;
+ }
+
+ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+ local_irq_disable();
+ }
+
+ finish_wait(&crtc->vbl_wait, &wait);
+
+ drm_vblank_put(dev, pipe);
+
+ *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+
+ trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
+
+ return true;
+}
+
+static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
+{
+ struct drm_device *dev = crtc->base.dev;
+ enum pipe pipe = crtc->pipe;
+ u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+
+ trace_i915_pipe_update_end(crtc, end_vbl_count);
+
+ local_irq_enable();
+
+ if (start_vbl_count != end_vbl_count)
+ DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
+ pipe_name(pipe), start_vbl_count, end_vbl_count);
+}
+
+static void intel_update_primary_plane(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ int reg = DSPCNTR(crtc->plane);
+
+ if (crtc->primary_enabled)
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+ else
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
+static void
+vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SP_PIXFORMAT_MASK;
+ sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SP_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUYV:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+ break;
+ case DRM_FORMAT_RGB565:
+ sprctl |= SP_FORMAT_BGR565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SP_FORMAT_BGRX8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ sprctl |= SP_FORMAT_BGRA8888;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SP_FORMAT_RGBX1010102;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ sprctl |= SP_FORMAT_RGBA1010102;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SP_FORMAT_RGBX8888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ sprctl |= SP_FORMAT_RGBA8888;
+ break;
+ default:
+ /*
+ * If we get here one of the upper layers failed to filter
+ * out the unsupported plane formats
+ */
+ BUG();
+ break;
+ }
+
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ sprctl |= SP_GAMMA_ENABLE;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SP_TILED;
+
+ sprctl |= SP_ENABLE;
+
+ intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+ obj->tiling_mode,
+ pixel_size,
+ fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
+ I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+ I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+ else
+ I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+
+ I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPCNTR(pipe, plane), sprctl);
+ I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
+ sprsurf_offset);
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
+}
+
+static void
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
+ I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
+ ~SP_ENABLE);
+ /* Activate double buffered register update */
+ I915_WRITE(SPSURF(pipe, plane), 0);
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
+
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
+}
+
+static int
+vlv_update_colorkey(struct drm_plane *dplane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+ sprctl &= ~SP_SOURCE_KEY;
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+ I915_WRITE(SPCNTR(pipe, plane), sprctl);
+
+ POSTING_READ(SPKEYMSK(pipe, plane));
+
+ return 0;
+}
+
+static void
+vlv_get_colorkey(struct drm_plane *dplane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+
+ key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
+ key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
+ key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+ if (sprctl & SP_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_plane->pipe;
+ u32 sprctl, sprscale = 0;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ sprctl = I915_READ(SPRCTL(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SPRITE_PIXFORMAT_MASK;
+ sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+ sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SPRITE_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ sprctl |= SPRITE_GAMMA_ENABLE;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SPRITE_TILED;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+ else
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
+ sprctl |= SPRITE_ENABLE;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+ intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ if (crtc_w != src_w || crtc_h != src_h)
+ sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ else if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ else
+ I915_WRITE(SPRLINOFF(pipe), linear_offset);
+
+ I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRCTL(pipe), sprctl);
+ I915_WRITE(SPRSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_plane->pipe;
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
+ I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+ /* Can't leave the scaler enabled... */
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), 0);
+ /* Activate double buffered register update */
+ I915_WRITE(SPRSURF(pipe), 0);
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
+
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_wait_for_vblank(dev, pipe);
+
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+ sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+ I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+ POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+ if (sprctl & SPRITE_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (sprctl & SPRITE_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_plane->pipe;
+ unsigned long dvssurf_offset, linear_offset;
+ u32 dvscntr, dvsscale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ dvscntr = I915_READ(DVSCNTR(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ dvscntr &= ~DVS_PIXFORMAT_MASK;
+ dvscntr &= ~DVS_RGB_ORDER_XBGR;
+ dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+ dvscntr &= ~DVS_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ dvscntr |= DVS_GAMMA_ENABLE;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dvscntr |= DVS_TILED;
+
+ if (IS_GEN6(dev))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
+ dvscntr |= DVS_ENABLE;
+
+ intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ dvsscale = 0;
+ if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ dvssurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= dvssurf_offset;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ else
+ I915_WRITE(DVSLINOFF(pipe), linear_offset);
+
+ I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(DVSSCALE(pipe), dvsscale);
+ I915_WRITE(DVSCNTR(pipe), dvscntr);
+ I915_WRITE(DVSSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
+}
+
+static void
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_plane->pipe;
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
+ I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ /* Disable the scaler */
+ I915_WRITE(DVSSCALE(pipe), 0);
+ /* Flush double buffered register updates */
+ I915_WRITE(DVSSURF(pipe), 0);
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
+
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_wait_for_vblank(dev, pipe);
+
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+}
+
+static void
+intel_post_enable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ hsw_enable_ips(intel_crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void
+intel_pre_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->fbc.plane == intel_crtc->plane)
+ intel_disable_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ hsw_disable_ips(intel_crtc);
+}
+
+static int
+ilk_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+ dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+ I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+ POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+ if (dvscntr & DVS_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (dvscntr & DVS_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static bool
+format_is_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YVYU:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool colorkey_enabled(struct intel_plane *intel_plane)
+{
+ struct drm_intel_sprite_colorkey key;
+
+ intel_plane->get_colorkey(&intel_plane->base, &key);
+
+ return key.flags != I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int ret;
+ bool primary_enabled;
+ bool visible;
+ int hscale, vscale;
+ int max_scale, min_scale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ struct drm_rect src = {
+ /* sample coordinates in 16.16 fixed point */
+ .x1 = src_x,
+ .x2 = src_x + src_w,
+ .y1 = src_y,
+ .y2 = src_y + src_h,
+ };
+ struct drm_rect dst = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .x2 = crtc_x + crtc_w,
+ .y1 = crtc_y,
+ .y2 = crtc_y + crtc_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+ };
+ const struct {
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ } orig = {
+ .crtc_x = crtc_x,
+ .crtc_y = crtc_y,
+ .crtc_w = crtc_w,
+ .crtc_h = crtc_h,
+ .src_x = src_x,
+ .src_y = src_y,
+ .src_w = src_w,
+ .src_h = src_h,
+ };
+
+ /* Don't modify another pipe's plane */
+ if (intel_plane->pipe != intel_crtc->pipe) {
+ DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
+ return -EINVAL;
+ }
+
+ /* FIXME check all gen limits */
+ if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
+ DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
+ return -EINVAL;
+ }
+
+ /* Sprite planes can be linear or x-tiled surfaces */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ case I915_TILING_X:
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported tiling mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME the following code does a bunch of fuzzy adjustments to the
+ * coordinates and sizes. We probably need some way to decide whether
+ * more strict checking should be done instead.
+ */
+ max_scale = intel_plane->max_downscale << 16;
+ min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+
+ hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+ BUG_ON(hscale < 0);
+
+ vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+ BUG_ON(vscale < 0);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+
+ crtc_x = dst.x1;
+ crtc_y = dst.y1;
+ crtc_w = drm_rect_width(&dst);
+ crtc_h = drm_rect_height(&dst);
+
+ if (visible) {
+ /* check again in case clipping clamped the results */
+ hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+ if (hscale < 0) {
+ DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
+ drm_rect_debug_print(&src, true);
+ drm_rect_debug_print(&dst, false);
+
+ return hscale;
+ }
+
+ vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+ if (vscale < 0) {
+ DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
+ drm_rect_debug_print(&src, true);
+ drm_rect_debug_print(&dst, false);
+
+ return vscale;
+ }
+
+ /* Make the source viewport size an exact multiple of the scaling factors. */
+ drm_rect_adjust_size(&src,
+ drm_rect_width(&dst) * hscale - drm_rect_width(&src),
+ drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+
+ /* sanity check to make sure the src viewport wasn't enlarged */
+ WARN_ON(src.x1 < (int) src_x ||
+ src.y1 < (int) src_y ||
+ src.x2 > (int) (src_x + src_w) ||
+ src.y2 > (int) (src_y + src_h));
+
+ /*
+ * Hardware doesn't handle subpixel coordinates.
+ * Adjust to (macro)pixel boundary, but be careful not to
+ * increase the source viewport size, because that could
+ * push the downscaling factor out of bounds.
+ */
+ src_x = src.x1 >> 16;
+ src_w = drm_rect_width(&src) >> 16;
+ src_y = src.y1 >> 16;
+ src_h = drm_rect_height(&src) >> 16;
+
+ if (format_is_yuv(fb->pixel_format)) {
+ src_x &= ~1;
+ src_w &= ~1;
+
+ /*
+ * Must keep src and dst the
+ * same if we can't scale.
+ */
+ if (!intel_plane->can_scale)
+ crtc_w &= ~1;
+
+ if (crtc_w == 0)
+ visible = false;
+ }
+ }
+
+ /* Check size restrictions when scaling */
+ if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+ unsigned int width_bytes;
+
+ WARN_ON(!intel_plane->can_scale);
+
+ /* FIXME interlacing min height is 6 */
+
+ if (crtc_w < 3 || crtc_h < 3)
+ visible = false;
+
+ if (src_w < 3 || src_h < 3)
+ visible = false;
+
+ width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+
+ if (src_w > 2048 || src_h > 2048 ||
+ width_bytes > 4096 || fb->pitches[0] > 4096) {
+ DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
+ return -EINVAL;
+ }
+ }
+
+ dst.x1 = crtc_x;
+ dst.x2 = crtc_x + crtc_w;
+ dst.y1 = crtc_y;
+ dst.y2 = crtc_y + crtc_h;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
+ WARN_ON(!primary_enabled && !visible && intel_crtc->active);
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Note that this will apply the VT-d workaround for scanouts,
+ * which is more restrictive than required for sprites. (The
+ * primary plane requires 256KiB alignment with 64 PTE padding,
+ * the sprite planes only require 128KiB alignment and 32 PTE padding.
+ */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ intel_plane->crtc_x = orig.crtc_x;
+ intel_plane->crtc_y = orig.crtc_y;
+ intel_plane->crtc_w = orig.crtc_w;
+ intel_plane->crtc_h = orig.crtc_h;
+ intel_plane->src_x = orig.src_x;
+ intel_plane->src_y = orig.src_y;
+ intel_plane->src_w = orig.src_w;
+ intel_plane->src_h = orig.src_h;
+ intel_plane->obj = obj;
+
+ if (intel_crtc->active) {
+ bool primary_was_enabled = intel_crtc->primary_enabled;
+
+ intel_crtc->primary_enabled = primary_enabled;
+
+ if (primary_was_enabled != primary_enabled)
+ intel_crtc_wait_for_pending_flips(crtc);
+
+ if (primary_was_enabled && !primary_enabled)
+ intel_pre_disable_primary(crtc);
+
+ if (visible)
+ intel_plane->update_plane(plane, crtc, fb, obj,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ else
+ intel_plane->disable_plane(plane, crtc);
+
+ if (!primary_was_enabled && primary_enabled)
+ intel_post_enable_primary(crtc);
+ }
+
+ /* Unpin old obj after new one is active to avoid ugliness */
+ if (old_obj) {
+ /*
+ * It's fairly common to simply update the position of
+ * an existing object. In that case, we don't need to
+ * wait for vblank to avoid ugliness, we only need to
+ * do the pin & ref bookkeeping.
+ */
+ if (old_obj != obj && intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(old_obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ return 0;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc;
+
+ if (!plane->fb)
+ return 0;
+
+ if (WARN_ON(!plane->crtc))
+ return -EINVAL;
+
+ intel_crtc = to_intel_crtc(plane->crtc);
+
+ if (intel_crtc->active) {
+ bool primary_was_enabled = intel_crtc->primary_enabled;
+
+ intel_crtc->primary_enabled = true;
+
+ intel_plane->disable_plane(plane, plane->crtc);
+
+ if (!primary_was_enabled && intel_crtc->primary_enabled)
+ intel_post_enable_primary(plane->crtc);
+ }
+
+ if (intel_plane->obj) {
+ if (intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_plane->pipe);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(intel_plane->obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_plane->obj = NULL;
+ }
+
+ return 0;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ intel_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *get = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+void intel_plane_restore(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (!plane->crtc || !plane->fb)
+ return;
+
+ intel_update_plane(plane, plane->crtc, plane->fb,
+ intel_plane->crtc_x, intel_plane->crtc_y,
+ intel_plane->crtc_w, intel_plane->crtc_h,
+ intel_plane->src_x, intel_plane->src_y,
+ intel_plane->src_w, intel_plane->src_h);
+}
+
+void intel_plane_disable(struct drm_plane *plane)
+{
+ if (!plane->crtc || !plane->fb)
+ return;
+
+ intel_disable_plane(plane);
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
+ .destroy = intel_destroy_plane,
+};
+
+static uint32_t ilk_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static uint32_t snb_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static uint32_t vlv_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+{
+ struct intel_plane *intel_plane;
+ unsigned long possible_crtcs;
+ const uint32_t *plane_formats;
+ int num_plane_formats;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen < 5)
+ return -ENODEV;
+
+ intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
+ if (!intel_plane)
+ return -ENOMEM;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ case 6:
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 16;
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_colorkey = ilk_update_colorkey;
+ intel_plane->get_colorkey = ilk_get_colorkey;
+
+ if (IS_GEN6(dev)) {
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ }
+ break;
+
+ case 7:
+ case 8:
+ if (IS_IVYBRIDGE(dev)) {
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 2;
+ } else {
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+ }
+
+ if (IS_VALLEYVIEW(dev)) {
+ intel_plane->update_plane = vlv_update_plane;
+ intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->update_colorkey = vlv_update_colorkey;
+ intel_plane->get_colorkey = vlv_get_colorkey;
+
+ plane_formats = vlv_plane_formats;
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ } else {
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ }
+ break;
+
+ default:
+ kfree(intel_plane);
+ return -ENODEV;
+ }
+
+ intel_plane->pipe = pipe;
+ intel_plane->plane = plane;
+ possible_crtcs = (1 << pipe);
+ ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ false);
+ if (ret)
+ kfree(intel_plane);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 1d5b9b7b033..67c6c9a2eb1 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -30,12 +30,11 @@
* Integrated TV-out support for the 915GM and 945GM.
*/
-#include "drmP.h"
-#include "drm.h"
-#include "drm_crtc.h"
-#include "drm_edid.h"
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include "intel_drv.h"
-#include "i915_drm.h"
+#include <drm/i915_drm.h>
#include "i915_drv.h"
enum tv_margin {
@@ -44,9 +43,11 @@ enum tv_margin {
};
/** Private structure for the integrated TV support */
-struct intel_tv_priv {
+struct intel_tv {
+ struct intel_encoder base;
+
int type;
- char *tv_format;
+ const char *tv_format;
int margin[4];
u32 save_TV_H_CTL_1;
u32 save_TV_H_CTL_2;
@@ -192,10 +193,10 @@ static const u32 filter_table[] = {
*
* if (f >= 1) {
* exp = 0x7;
- * mant = 1 << 8;
+ * mant = 1 << 8;
* } else {
* for (exp = 0; exp < 3 && f < 0.5; exp++)
- * f *= 2.0;
+ * f *= 2.0;
* mant = (f * (1 << 9) + 0.5);
* if (mant >= (1 << 9))
* mant = (1 << 9) - 1;
@@ -348,7 +349,7 @@ static const struct video_levels component_levels = {
struct tv_mode {
- char *name;
+ const char *name;
int clock;
int refresh; /* in millihertz (for precision) */
u32 oversample;
@@ -415,7 +416,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
@@ -428,7 +429,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -458,7 +459,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-443",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
@@ -470,13 +471,13 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
.nbr_end = 240,
- .burst_ena = 8,
+ .burst_ena = true,
.hburst_start = 72, .hburst_len = 34,
.vburst_start_f1 = 9, .vburst_end_f1 = 240,
.vburst_start_f2 = 10, .vburst_end_f2 = 240,
@@ -500,7 +501,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-J",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -513,7 +514,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -543,7 +544,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "PAL-M",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -556,7 +557,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -587,7 +588,7 @@ static const struct tv_mode tv_modes[] = {
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL-N",
.clock = 108000,
- .refresh = 25000,
+ .refresh = 50000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -600,14 +601,14 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 24, .vi_end_f2 = 25,
.nbr_end = 286,
.burst_ena = true,
- .hburst_start = 73, .hburst_len = 34,
+ .hburst_start = 73, .hburst_len = 34,
.vburst_start_f1 = 8, .vburst_end_f1 = 285,
.vburst_start_f2 = 8, .vburst_end_f2 = 286,
.vburst_start_f3 = 9, .vburst_end_f3 = 286,
@@ -632,7 +633,7 @@ static const struct tv_mode tv_modes[] = {
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL",
.clock = 108000,
- .refresh = 25000,
+ .refresh = 50000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -644,7 +645,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 5, .vsync_start_f2 = 6,
.vsync_len = 5,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 15,
.vi_end_f1 = 24, .vi_end_f2 = 25,
@@ -672,8 +673,8 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
- .name = "480p@59.94Hz",
- .clock = 107520,
+ .name = "480p",
+ .clock = 107520,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
@@ -681,31 +682,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 64, .hblank_end = 122,
.hblank_start = 842, .htotal = 857,
- .progressive = true,.trilevel_sync = false,
-
- .vsync_start_f1 = 12, .vsync_start_f2 = 12,
- .vsync_len = 12,
-
- .veq_ena = false,
-
- .vi_end_f1 = 44, .vi_end_f2 = 44,
- .nbr_end = 479,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
- {
- .name = "480p@60Hz",
- .clock = 107520,
- .refresh = 60000,
- .oversample = TV_OVERSAMPLE_4X,
- .component_only = 1,
-
- .hsync_end = 64, .hblank_end = 122,
- .hblank_start = 842, .htotal = 856,
-
- .progressive = true,.trilevel_sync = false,
+ .progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 12, .vsync_start_f2 = 12,
.vsync_len = 12,
@@ -721,7 +698,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "576p",
- .clock = 107520,
+ .clock = 107520,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
@@ -729,7 +706,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 64, .hblank_end = 139,
.hblank_start = 859, .htotal = 863,
- .progressive = true, .trilevel_sync = false,
+ .progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -753,31 +730,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1649,
- .progressive = true, .trilevel_sync = true,
-
- .vsync_start_f1 = 10, .vsync_start_f2 = 10,
- .vsync_len = 10,
-
- .veq_ena = false,
-
- .vi_end_f1 = 29, .vi_end_f2 = 29,
- .nbr_end = 719,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
- {
- .name = "720p@59.94Hz",
- .clock = 148800,
- .refresh = 59940,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
-
- .hsync_end = 80, .hblank_end = 300,
- .hblank_start = 1580, .htotal = 1651,
-
- .progressive = true, .trilevel_sync = true,
+ .progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -801,7 +754,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1979,
- .progressive = true, .trilevel_sync = true,
+ .progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -819,19 +772,19 @@ static const struct tv_mode tv_modes[] = {
{
.name = "1080i@50Hz",
.clock = 148800,
- .refresh = 25000,
+ .refresh = 50000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2639,
- .progressive = false, .trilevel_sync = true,
+ .progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
- .veq_ena = true, .veq_start_f1 = 4,
+ .veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
@@ -845,19 +798,19 @@ static const struct tv_mode tv_modes[] = {
{
.name = "1080i@60Hz",
.clock = 148800,
- .refresh = 30000,
+ .refresh = 60000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2199,
- .progressive = false, .trilevel_sync = true,
+ .progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
- .veq_ena = true, .veq_start_f1 = 4,
+ .veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
@@ -868,197 +821,57 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
- {
- .name = "1080i@59.94Hz",
- .clock = 148800,
- .refresh = 29970,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
-
- .hsync_end = 88, .hblank_end = 235,
- .hblank_start = 2155, .htotal = 2201,
-
- .progressive = false, .trilevel_sync = true,
-
- .vsync_start_f1 = 4, .vsync_start_f2 = 5,
- .vsync_len = 10,
-
- .veq_ena = true, .veq_start_f1 = 4,
- .veq_start_f2 = 4, .veq_len = 10,
-
-
- .vi_end_f1 = 21, .vi_end_f2 = 22,
- .nbr_end = 539,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
};
-#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
-
-static void
-intel_tv_dpms(struct drm_encoder *encoder, int mode)
+static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ return container_of(encoder, struct intel_tv, base);
+}
- switch(mode) {
- case DRM_MODE_DPMS_ON:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
- break;
- }
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+ return enc_to_tv(intel_attached_encoder(connector));
}
-static void
-intel_tv_save(struct drm_connector *connector)
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- int i;
+ u32 tmp = I915_READ(TV_CTL);
- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
+ if (!(tmp & TV_ENC_ENABLE))
+ return false;
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
+ *pipe = PORT_TO_PIPE(tmp);
- tv_priv->save_TV_DAC = I915_READ(TV_DAC);
- tv_priv->save_TV_CTL = I915_READ(TV_CTL);
+ return true;
}
static void
-intel_tv_restore(struct drm_connector *connector)
+intel_enable_tv(struct intel_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_crtc *intel_crtc;
- int i;
-
- /* FIXME: No CRTC? */
- if (!crtc)
- return;
-
- intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
- {
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *
-intel_tv_mode_lookup (char *tv_format)
+intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -1068,102 +881,67 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_output *intel_output)
+intel_tv_mode_find(struct intel_tv *intel_tv)
{
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
-
- return intel_tv_mode_lookup(tv_priv->tv_format);
+ return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
< 1000)
return MODE_OK;
+
return MODE_CLOCK_RANGE;
}
+static void
+intel_tv_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static bool
-intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+intel_tv_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct drm_device *dev = encoder->dev;
- struct drm_mode_config *drm_config = &dev->mode_config;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output);
- struct drm_encoder *other_encoder;
+ struct intel_tv *intel_tv = enc_to_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (!tv_mode)
return false;
- /* FIXME: lock encoder list */
- list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
- if (other_encoder != encoder &&
- other_encoder->crtc == encoder->crtc)
- return false;
- }
+ pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
+ DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+ pipe_config->pipe_bpp = 8*3;
+
+ /* TV has it's own notion of sync and other mode flags, so clear them. */
+ pipe_config->adjusted_mode.flags = 0;
+
+ /*
+ * FIXME: We don't check whether the input mode is actually what we want
+ * or whether userspace is doing something stupid.
+ */
- adjusted_mode->clock = tv_mode->clock;
return true;
}
static void
-intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+set_tv_mode_timings(struct drm_i915_private *dev_priv,
+ const struct tv_mode *tv_mode,
+ bool burst_ena)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
- u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
- u32 scctl1, scctl2, scctl3;
- int i, j;
- const struct video_levels *video_levels;
- const struct color_conversion *color_conversion;
- bool burst_ena;
-
- if (!tv_mode)
- return; /* can't happen (mode_prepare prevents this) */
-
- tv_ctl = I915_READ(TV_CTL);
- tv_ctl &= TV_CTL_SAVE;
- switch (tv_priv->type) {
- default:
- case DRM_MODE_CONNECTOR_Unknown:
- case DRM_MODE_CONNECTOR_Composite:
- tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
- video_levels = tv_mode->composite_levels;
- color_conversion = tv_mode->composite_color;
- burst_ena = tv_mode->burst_ena;
- break;
- case DRM_MODE_CONNECTOR_Component:
- tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
- video_levels = &component_levels;
- if (tv_mode->burst_ena)
- color_conversion = &sdtv_csc_yprpb;
- else
- color_conversion = &hdtv_csc_yprpb;
- burst_ena = false;
- break;
- case DRM_MODE_CONNECTOR_SVIDEO:
- tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
- video_levels = tv_mode->svideo_levels;
- color_conversion = tv_mode->svideo_color;
- burst_ena = tv_mode->burst_ena;
- break;
- }
hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
(tv_mode->htotal << TV_HTOTAL_SHIFT);
@@ -1203,6 +981,86 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+ I915_WRITE(TV_H_CTL_1, hctl1);
+ I915_WRITE(TV_H_CTL_2, hctl2);
+ I915_WRITE(TV_H_CTL_3, hctl3);
+ I915_WRITE(TV_V_CTL_1, vctl1);
+ I915_WRITE(TV_V_CTL_2, vctl2);
+ I915_WRITE(TV_V_CTL_3, vctl3);
+ I915_WRITE(TV_V_CTL_4, vctl4);
+ I915_WRITE(TV_V_CTL_5, vctl5);
+ I915_WRITE(TV_V_CTL_6, vctl6);
+ I915_WRITE(TV_V_CTL_7, vctl7);
+}
+
+static void set_color_conversion(struct drm_i915_private *dev_priv,
+ const struct color_conversion *color_conversion)
+{
+ if (!color_conversion)
+ return;
+
+ I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+ color_conversion->gy);
+ I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
+ color_conversion->ay);
+ I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+ color_conversion->gu);
+ I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+ color_conversion->au);
+ I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+ color_conversion->gv);
+ I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+ color_conversion->av);
+}
+
+static void intel_tv_pre_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_tv *intel_tv = enc_to_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ u32 tv_ctl;
+ u32 scctl1, scctl2, scctl3;
+ int i, j;
+ const struct video_levels *video_levels;
+ const struct color_conversion *color_conversion;
+ bool burst_ena;
+ int xpos = 0x0, ypos = 0x0;
+ unsigned int xsize, ysize;
+
+ if (!tv_mode)
+ return; /* can't happen (mode_prepare prevents this) */
+
+ tv_ctl = I915_READ(TV_CTL);
+ tv_ctl &= TV_CTL_SAVE;
+
+ switch (intel_tv->type) {
+ default:
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_Composite:
+ tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+ video_levels = tv_mode->composite_levels;
+ color_conversion = tv_mode->composite_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ case DRM_MODE_CONNECTOR_Component:
+ tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+ video_levels = &component_levels;
+ if (tv_mode->burst_ena)
+ color_conversion = &sdtv_csc_yprpb;
+ else
+ color_conversion = &hdtv_csc_yprpb;
+ burst_ena = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+ video_levels = tv_mode->svideo_levels;
+ color_conversion = tv_mode->svideo_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ }
+
if (intel_crtc->pipe == 1)
tv_ctl |= TV_ENC_PIPEB_SELECT;
tv_ctl |= tv_mode->oversample;
@@ -1233,39 +1091,18 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (dev->pci_device < 0x2772)
+ if (IS_I915GM(dev))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
- I915_WRITE(TV_H_CTL_1, hctl1);
- I915_WRITE(TV_H_CTL_2, hctl2);
- I915_WRITE(TV_H_CTL_3, hctl3);
- I915_WRITE(TV_V_CTL_1, vctl1);
- I915_WRITE(TV_V_CTL_2, vctl2);
- I915_WRITE(TV_V_CTL_3, vctl3);
- I915_WRITE(TV_V_CTL_4, vctl4);
- I915_WRITE(TV_V_CTL_5, vctl5);
- I915_WRITE(TV_V_CTL_6, vctl6);
- I915_WRITE(TV_V_CTL_7, vctl7);
+ set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
+
I915_WRITE(TV_SC_CTL_1, scctl1);
I915_WRITE(TV_SC_CTL_2, scctl2);
I915_WRITE(TV_SC_CTL_3, scctl3);
- if (color_conversion) {
- I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
- color_conversion->gy);
- I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
- color_conversion->ay);
- I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
- color_conversion->gu);
- I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
- color_conversion->au);
- I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
- color_conversion->gv);
- I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
- color_conversion->av);
- }
+ set_color_conversion(dev_priv, color_conversion);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1274,52 +1111,25 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- {
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
- int xpos = 0x0, ypos = 0x0;
- unsigned int xsize, ysize;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
- /* Wait for vblank for the disable to take effect */
- if (!IS_I9XX(dev))
- intel_wait_for_vblank(dev);
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
- xsize = tv_mode->hblank_start - tv_mode->hblank_end;
- if (tv_mode->progressive)
- ysize = tv_mode->nbr_end + 1;
- else
- ysize = 2*tv_mode->nbr_end + 1;
-
- xpos += tv_priv->margin[TV_MARGIN_LEFT];
- ypos += tv_priv->margin[TV_MARGIN_TOP];
- xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
- tv_priv->margin[TV_MARGIN_RIGHT]);
- ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
- tv_priv->margin[TV_MARGIN_BOTTOM]);
- I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
- I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
-
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+
+ assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+ xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+ if (tv_mode->progressive)
+ ysize = tv_mode->nbr_end + 1;
+ else
+ ysize = 2*tv_mode->nbr_end + 1;
+
+ xpos += intel_tv->margin[TV_MARGIN_LEFT];
+ ypos += intel_tv->margin[TV_MARGIN_TOP];
+ xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+ I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+ I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
j = 0;
for (i = 0; i < 60; i++)
@@ -1330,7 +1140,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
- I915_WRITE(TV_DAC, 0);
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
I915_WRITE(TV_CTL, tv_ctl);
}
@@ -1360,37 +1170,40 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
+intel_tv_detect_type(struct intel_tv *intel_tv,
+ struct drm_connector *connector)
{
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_tv->base.base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
- int type = DRM_MODE_CONNECTOR_Unknown;
-
- tv_dac = I915_READ(TV_DAC);
+ int type;
/* Disable TV interrupts around load detect or we'll recurse */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
- /*
- * Detect TV by polling)
- */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
+ save_tv_dac = tv_dac = I915_READ(TV_DAC);
+ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ else
+ tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL |
@@ -1399,13 +1212,26 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
DAC_A_0_7_V |
DAC_B_0_7_V |
DAC_C_0_7_V);
+
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev);
+ POSTING_READ(TV_DAC);
+
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+ type = -1;
tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev);
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
/*
* A B C
* 0 1 1 Composite
@@ -1422,15 +1248,26 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
DRM_DEBUG_KMS("Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- DRM_DEBUG_KMS("No TV connection detected\n");
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
type = -1;
}
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
/* Restore interrupt config */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
return type;
}
@@ -1441,12 +1278,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
- if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
return;
@@ -1454,13 +1290,13 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
tv_mode = tv_modes + i;
- if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
break;
}
- tv_priv->tv_format = tv_mode->name;
- drm_connector_property_set_value(connector,
+ intel_tv->tv_format = tv_mode->name;
+ drm_object_property_set_value(&connector->base,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1471,44 +1307,43 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
* we have a pipe programmed in order to probe the TV.
*/
static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector)
+intel_tv_detect(struct drm_connector *connector, bool force)
{
- struct drm_crtc *crtc;
struct drm_display_mode mode;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
- int dpms_mode;
- int type = tv_priv->type;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ int type;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name,
+ force);
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
- if (encoder->crtc && encoder->crtc->enabled) {
- type = intel_tv_detect_type(encoder->crtc, intel_output);
- } else {
- crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
- if (crtc) {
- type = intel_tv_detect_type(crtc, intel_output);
- intel_release_load_detect_pipe(intel_output, dpms_mode);
- } else
- type = -1;
- }
+ if (force) {
+ struct intel_load_detect_pipe tmp;
+ struct drm_modeset_acquire_ctx ctx;
- tv_priv->type = type;
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
+ type = intel_tv_detect_type(intel_tv, connector);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
+ } else
+ return connector_status_unknown;
+ } else
+ return connector->status;
if (type < 0)
return connector_status_disconnected;
+ intel_tv->type = type;
intel_tv_find_better_format(connector);
+
return connector_status_connected;
}
-static struct input_res {
- char *name;
+static const struct input_res {
+ const char *name;
int w, h;
-} input_res_table[] =
-{
+} input_res_table[] = {
{"640x480", 640, 480},
{"800x600", 800, 600},
{"1024x768", 1024, 768},
@@ -1525,8 +1360,8 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1550,14 +1385,14 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
- for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
+ for (j = 0; j < ARRAY_SIZE(input_res_table);
j++) {
- struct input_res *input = &input_res_table[j];
+ const struct input_res *input = &input_res_table[j];
unsigned int hactive_s = input->w;
unsigned int vactive_s = input->h;
@@ -1602,13 +1437,10 @@ intel_tv_get_modes(struct drm_connector *connector)
}
static void
-intel_tv_destroy (struct drm_connector *connector)
+intel_tv_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
-
- drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(connector);
}
@@ -1617,42 +1449,40 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
- struct drm_crtc *crtc = encoder->crtc;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct drm_crtc *crtc = intel_tv->base.base.crtc;
int ret = 0;
bool changed = false;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret < 0)
goto out;
if (property == dev->mode_config.tv_left_margin_property &&
- tv_priv->margin[TV_MARGIN_LEFT] != val) {
- tv_priv->margin[TV_MARGIN_LEFT] = val;
+ intel_tv->margin[TV_MARGIN_LEFT] != val) {
+ intel_tv->margin[TV_MARGIN_LEFT] = val;
changed = true;
} else if (property == dev->mode_config.tv_right_margin_property &&
- tv_priv->margin[TV_MARGIN_RIGHT] != val) {
- tv_priv->margin[TV_MARGIN_RIGHT] = val;
+ intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+ intel_tv->margin[TV_MARGIN_RIGHT] = val;
changed = true;
} else if (property == dev->mode_config.tv_top_margin_property &&
- tv_priv->margin[TV_MARGIN_TOP] != val) {
- tv_priv->margin[TV_MARGIN_TOP] = val;
+ intel_tv->margin[TV_MARGIN_TOP] != val) {
+ intel_tv->margin[TV_MARGIN_TOP] = val;
changed = true;
} else if (property == dev->mode_config.tv_bottom_margin_property &&
- tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
- tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+ intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+ intel_tv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
- if (val >= NUM_TV_MODES) {
+ if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
- if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
+ if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
goto out;
- tv_priv->tv_format = tv_modes[val].name;
+ intel_tv->tv_format = tv_modes[val].name;
changed = true;
} else {
ret = -EINVAL;
@@ -1660,24 +1490,13 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
out:
return ret;
}
-static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .dpms = intel_tv_dpms,
- .mode_fixup = intel_tv_mode_fixup,
- .prepare = intel_encoder_prepare,
- .mode_set = intel_tv_mode_set,
- .commit = intel_encoder_commit,
-};
-
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .save = intel_tv_save,
- .restore = intel_tv_restore,
+ .dpms = intel_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1690,13 +1509,8 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs =
.best_encoder = intel_best_encoder,
};
-static void intel_tv_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
- .destroy = intel_tv_enc_destroy,
+ .destroy = intel_encoder_destroy,
};
/*
@@ -1709,25 +1523,30 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
static int tv_is_present_in_vbt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
+ union child_device_config *p_child;
int i, ret;
- if (!dev_priv->child_dev_num)
+ if (!dev_priv->vbt.child_dev_num)
return 1;
ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
/*
* If the device type is not TV, continue.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_TV &&
- p_child->device_type != DEVICE_TYPE_TV)
+ switch (p_child->old.device_type) {
+ case DEVICE_TYPE_INT_TV:
+ case DEVICE_TYPE_TV:
+ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+ break;
+ default:
continue;
+ }
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
- if (p_child->addin_offset) {
+ if (p_child->old.addin_offset) {
ret = 1;
break;
}
@@ -1740,10 +1559,11 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
- struct intel_tv_priv *tv_priv;
+ struct intel_tv *intel_tv;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
- char **tv_format_names;
+ char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i, initial_mode = 0;
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1754,7 +1574,7 @@ intel_tv_init(struct drm_device *dev)
return;
}
/* Even if we have an encoder we may not have a connector */
- if (!dev_priv->int_tv_support)
+ if (!dev_priv->vbt.int_tv_support)
return;
/*
@@ -1780,68 +1600,85 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_output = kzalloc(sizeof(struct intel_output) +
- sizeof(struct intel_tv_priv), GFP_KERNEL);
- if (!intel_output) {
+ intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
+ if (!intel_tv) {
return;
}
- connector = &intel_output->base;
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_tv);
+ return;
+ }
+
+ intel_encoder = &intel_tv->base;
+ connector = &intel_connector->base;
+
+ /* The documentation, for the older chipsets at least, recommend
+ * using a polling method rather than hotplug detection for TVs.
+ * This is because in order to perform the hotplug detection, the PLLs
+ * for the TV must be kept alive increasing power drain and starving
+ * bandwidth from other encoders. Notably for instance, it causes
+ * pipe underruns on Crestline when this encoder is supposedly idle.
+ *
+ * More recent chipsets favour HDMI rather than integrated S-Video.
+ */
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- tv_priv = (struct intel_tv_priv *)(intel_output + 1);
- intel_output->type = INTEL_OUTPUT_TVOUT;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
- intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
- intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
- intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- intel_output->dev_priv = tv_priv;
- tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
+ intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->get_config = intel_tv_get_config;
+ intel_encoder->pre_enable = intel_tv_pre_enable;
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->unregister = intel_connector_unregister;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->cloneable = 0;
+ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
- tv_priv->margin[TV_MARGIN_LEFT] = 54;
- tv_priv->margin[TV_MARGIN_TOP] = 36;
- tv_priv->margin[TV_MARGIN_RIGHT] = 46;
- tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
+ intel_tv->margin[TV_MARGIN_LEFT] = 54;
+ intel_tv->margin[TV_MARGIN_TOP] = 36;
+ intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
- tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ intel_tv->tv_format = tv_modes[initial_mode].name;
- drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
- tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES,
- GFP_KERNEL);
- if (!tv_format_names)
- goto out;
- for (i = 0; i < NUM_TV_MODES; i++)
- tv_format_names[i] = tv_modes[i].name;
- drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names);
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+ tv_format_names[i] = (char *)tv_modes[i].name;
+ drm_mode_create_tv_properties(dev,
+ ARRAY_SIZE(tv_modes),
+ tv_format_names);
- drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
initial_mode);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
- tv_priv->margin[TV_MARGIN_LEFT]);
- drm_connector_attach_property(connector,
+ intel_tv->margin[TV_MARGIN_LEFT]);
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
- tv_priv->margin[TV_MARGIN_TOP]);
- drm_connector_attach_property(connector,
+ intel_tv->margin[TV_MARGIN_TOP]);
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
- tv_priv->margin[TV_MARGIN_RIGHT]);
- drm_connector_attach_property(connector,
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
- tv_priv->margin[TV_MARGIN_BOTTOM]);
-
- dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
-out:
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 00000000000..4f6fef7ac06
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
+#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
+#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+ WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
+
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE, 1);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:snb */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:ivb,hsw */
+ if (INTEL_INFO(dev_priv->dev)->gen < 8)
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+
+ gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+ if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+ __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (IS_GEN7(dev_priv->dev))
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ /* On VLV, FIFO will be shared by both SW and HW.
+ * So, we need to read the FREE_ENTRIES everytime */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ dev_priv->uncore.fifo_count =
+ __raw_i915_read32(dev_priv, GTFIFOCTL) &
+ GT_FIFO_FREE_ENTRIES_MASK;
+
+ if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->uncore.fifo_count = fifo;
+ }
+ dev_priv->uncore.fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+}
+
+static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_VLV) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Render to ack.\n");
+ }
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for media to ack.\n");
+ }
+
+ /* WaRsForcewakeWaitTC0:vlv */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+
+}
+
+static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (fw_engine & FORCEWAKE_RENDER &&
+ dev_priv->uncore.fw_rendercount++ != 0)
+ fw_engine &= ~FORCEWAKE_RENDER;
+ if (fw_engine & FORCEWAKE_MEDIA &&
+ dev_priv->uncore.fw_mediacount++ != 0)
+ fw_engine &= ~FORCEWAKE_MEDIA;
+
+ if (fw_engine)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (fw_engine & FORCEWAKE_RENDER) {
+ WARN_ON(!dev_priv->uncore.fw_rendercount);
+ if (--dev_priv->uncore.fw_rendercount != 0)
+ fw_engine &= ~FORCEWAKE_RENDER;
+ }
+
+ if (fw_engine & FORCEWAKE_MEDIA) {
+ WARN_ON(!dev_priv->uncore.fw_mediacount);
+ if (--dev_priv->uncore.fw_mediacount != 0)
+ fw_engine &= ~FORCEWAKE_MEDIA;
+ }
+
+ if (fw_engine)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void gen6_force_wake_timer(unsigned long arg)
+{
+ struct drm_i915_private *dev_priv = (void *)arg;
+ unsigned long irqflags;
+
+ assert_device_not_suspended(dev_priv);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ WARN_ON(!dev_priv->uncore.forcewake_count);
+
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
+ gen6_force_wake_timer((unsigned long)dev_priv);
+
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_force_wake_reset(dev_priv);
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
+ __gen6_gt_force_wake_reset(dev_priv);
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
+ __gen7_gt_force_wake_mt_reset(dev_priv);
+
+ if (restore) { /* If reset with a user forcewake, try to restore */
+ unsigned fw = 0;
+
+ if (IS_VALLEYVIEW(dev)) {
+ if (dev_priv->uncore.fw_rendercount)
+ fw |= FORCEWAKE_RENDER;
+
+ if (dev_priv->uncore.fw_mediacount)
+ fw |= FORCEWAKE_MEDIA;
+ } else {
+ if (dev_priv->uncore.forcewake_count)
+ fw = FORCEWAKE_ALL;
+ }
+
+ if (fw)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ dev_priv->uncore.fifo_count =
+ __raw_i915_read32(dev_priv, GTFIFOCTL) &
+ GT_FIFO_FREE_ENTRIES_MASK;
+ } else {
+ dev_priv->uncore.forcewake_count = 0;
+ dev_priv->uncore.fw_rendercount = 0;
+ dev_priv->uncore.fw_mediacount = 0;
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void intel_uncore_early_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+ if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
+ (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+ /* The docs do not explain exactly how the calculation can be
+ * made. It is somewhat guessable, but for now, it's always
+ * 128MB.
+ * NB: We can't write IDICR yet because we do not have gt funcs
+ * set up */
+ dev_priv->ellc_size = 128;
+ DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+ }
+
+ /* clear out old GT FIFO errors */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ __raw_i915_write32(dev_priv, GTFIFODBG,
+ __raw_i915_read32(dev_priv, GTFIFODBG));
+
+ intel_uncore_forcewake_reset(dev, false);
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ intel_disable_gt_powersave(dev);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* Redirect to VLV specific routine */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ return vlv_force_wake_get(dev_priv, fw_engine);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+ bool delayed = false;
+
+ if (!dev_priv->uncore.funcs.force_wake_put)
+ return;
+
+ /* Redirect to VLV specific routine */
+ if (IS_VALLEYVIEW(dev_priv->dev)) {
+ vlv_force_wake_put(dev_priv, fw_engine);
+ goto out;
+ }
+
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ WARN_ON(!dev_priv->uncore.forcewake_count);
+
+ if (--dev_priv->uncore.forcewake_count == 0) {
+ dev_priv->uncore.forcewake_count++;
+ delayed = true;
+ mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
+ jiffies + 1);
+ }
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+out:
+ if (!delayed)
+ intel_runtime_pm_put(dev_priv);
+}
+
+void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ WARN_ON(dev_priv->uncore.forcewake_count > 0);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((reg) < 0x40000 && (reg) != FORCEWAKE)
+
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (((reg) >= 0x2000 && (reg) < 0x4000) ||\
+ ((reg) >= 0x5000 && (reg) < 0x8000) ||\
+ ((reg) >= 0xB000 && (reg) < 0x12000) ||\
+ ((reg) >= 0x2E000 && (reg) < 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+ (((reg) >= 0x12000 && (reg) < 0x14000) ||\
+ ((reg) >= 0x22000 && (reg) < 0x24000) ||\
+ ((reg) >= 0x30000 && (reg) < 0x40000))
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+ * the chip from rc6 before touching it for real. MI_MODE is masked,
+ * hence harmless to write 0 into. */
+ __raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+ reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+ DRM_ERROR("Unclaimed write to %x\n", reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+#define REG_READ_HEADER(x) \
+ unsigned long irqflags; \
+ u##x val = 0; \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define REG_READ_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val
+
+#define __gen4_read(x) \
+static u##x \
+gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ REG_READ_FOOTER; \
+}
+
+#define __gen5_read(x) \
+static u##x \
+gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ ilk_dummy_write(dev_priv); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ REG_READ_FOOTER; \
+}
+
+#define __gen6_read(x) \
+static u##x \
+gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ if (dev_priv->uncore.forcewake_count == 0 && \
+ NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ REG_READ_FOOTER; \
+}
+
+#define __vlv_read(x) \
+static u##x \
+vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ unsigned fwengine = 0; \
+ REG_READ_HEADER(x); \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_READ_FOOTER; \
+}
+
+
+__vlv_read(8)
+__vlv_read(16)
+__vlv_read(32)
+__vlv_read(64)
+__gen6_read(8)
+__gen6_read(16)
+__gen6_read(32)
+__gen6_read(64)
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen4_read(8)
+__gen4_read(16)
+__gen4_read(32)
+__gen4_read(64)
+
+#undef __vlv_read
+#undef __gen6_read
+#undef __gen5_read
+#undef __gen4_read
+#undef REG_READ_FOOTER
+#undef REG_READ_HEADER
+
+#define REG_WRITE_HEADER \
+ unsigned long irqflags; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ assert_device_not_suspended(dev_priv); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define REG_WRITE_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
+#define __gen4_write(x) \
+static void \
+gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ REG_WRITE_HEADER; \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ REG_WRITE_FOOTER; \
+}
+
+#define __gen5_write(x) \
+static void \
+gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ REG_WRITE_HEADER; \
+ ilk_dummy_write(dev_priv); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ REG_WRITE_FOOTER; \
+}
+
+#define __gen6_write(x) \
+static void \
+gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ u32 __fifo_ret = 0; \
+ REG_WRITE_HEADER; \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+ REG_WRITE_FOOTER; \
+}
+
+#define __hsw_write(x) \
+static void \
+hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ u32 __fifo_ret = 0; \
+ REG_WRITE_HEADER; \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ hsw_unclaimed_reg_clear(dev_priv, reg); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+ hsw_unclaimed_reg_check(dev_priv, reg); \
+ REG_WRITE_FOOTER; \
+}
+
+static const u32 gen8_shadowed_regs[] = {
+ FORCEWAKE_MT,
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+ if (reg == gen8_shadowed_regs[i])
+ return true;
+
+ return false;
+}
+
+#define __gen8_write(x) \
+static void \
+gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ REG_WRITE_HEADER; \
+ if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
+ } else { \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ } \
+ REG_WRITE_FOOTER; \
+}
+
+__gen8_write(8)
+__gen8_write(16)
+__gen8_write(32)
+__gen8_write(64)
+__hsw_write(8)
+__hsw_write(16)
+__hsw_write(32)
+__hsw_write(64)
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
+__gen6_write(64)
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen4_write(8)
+__gen4_write(16)
+__gen4_write(32)
+__gen4_write(64)
+
+#undef __gen8_write
+#undef __hsw_write
+#undef __gen6_write
+#undef __gen5_write
+#undef __gen4_write
+#undef REG_WRITE_FOOTER
+#undef REG_WRITE_HEADER
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ setup_timer(&dev_priv->uncore.force_wake_timer,
+ gen6_force_wake_timer, (unsigned long)dev_priv);
+
+ intel_uncore_early_sanitize(dev);
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
+ } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
+ ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+ __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen7_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen7_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+ } else if (IS_GEN6(dev)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+
+ switch (INTEL_INFO(dev)->gen) {
+ default:
+ dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen8_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen8_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ break;
+ case 7:
+ case 6:
+ if (IS_HASWELL(dev)) {
+ dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
+ dev_priv->uncore.funcs.mmio_writew = hsw_write16;
+ dev_priv->uncore.funcs.mmio_writel = hsw_write32;
+ dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
+ } else {
+ dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen6_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen6_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
+ }
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.mmio_readb = vlv_read8;
+ dev_priv->uncore.funcs.mmio_readw = vlv_read16;
+ dev_priv->uncore.funcs.mmio_readl = vlv_read32;
+ dev_priv->uncore.funcs.mmio_readq = vlv_read64;
+ } else {
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ }
+ break;
+ case 5:
+ dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen5_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen5_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen5_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen5_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen5_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen5_read64;
+ break;
+ case 4:
+ case 3:
+ case 2:
+ dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen4_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen4_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen4_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen4_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen4_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen4_read64;
+ break;
+ }
+}
+
+void intel_uncore_fini(struct drm_device *dev)
+{
+ /* Paranoia: make sure we have disabled everything before we exit. */
+ intel_uncore_sanitize(dev);
+ intel_uncore_forcewake_reset(dev, false);
+}
+
+#define GEN_RANGE(l, h) GENMASK(h, l)
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+ uint32_t gen_bitmask;
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ intel_runtime_pm_get(dev_priv);
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reset_stats *args = data;
+ struct i915_ctx_hang_stats *hs;
+ struct intel_context *ctx;
+ int ret;
+
+ if (args->flags || args->pad)
+ return -EINVAL;
+
+ if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+ hs = &ctx->hang_stats;
+
+ if (capable(CAP_SYS_ADMIN))
+ args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ else
+ args->reset_count = 0;
+
+ args->batch_active = hs->batch_active;
+ args->batch_pending = hs->batch_pending;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int i965_do_reset(struct drm_device *dev)
+{
+ int ret;
+
+ /* FIXME: i965g/gm need a display save/restore for gpu reset. */
+ return -ENODEV;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+ return 0;
+}
+
+static int g4x_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+ return 0;
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
+ ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ if (ret)
+ return ret;
+
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
+ ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ if (ret)
+ return ret;
+
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
+
+ return 0;
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+ intel_uncore_forcewake_reset(dev, true);
+
+ return ret;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 8:
+ case 7:
+ case 6: return gen6_do_reset(dev);
+ case 5: return ironlake_do_reset(dev);
+ case 4:
+ if (IS_G4X(dev))
+ return g4x_do_reset(dev);
+ else
+ return i965_do_reset(dev);
+ default: return -ENODEV;
+ }
+}
+
+void intel_uncore_check_errors(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed register before interrupt\n");
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 3c917fb3a60..c3bf059ba72 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -35,10 +35,8 @@
* \author Gareth Hughes <gareth@valinux.com>
*/
-#include "drmP.h"
-#include "drm.h"
-#include "drm_sarea.h"
-#include "mga_drm.h"
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
#include "mga_drv.h"
#define MGA_DEFAULT_USEC_TIMEOUT 10000
@@ -52,7 +50,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
* Engine control
*/
-int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
+int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
{
u32 status = 0;
int i;
@@ -74,7 +72,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
return -EBUSY;
}
-static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
+static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
@@ -102,7 +100,7 @@ static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
* Primary DMA stream
*/
-void mga_do_dma_flush(drm_mga_private_t * dev_priv)
+void mga_do_dma_flush(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
@@ -142,11 +140,10 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
head = MGA_READ(MGA_PRIMADDRESS);
- if (head <= tail) {
+ if (head <= tail)
primary->space = primary->size - primary->tail;
- } else {
+ else
primary->space = head - tail;
- }
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
@@ -158,7 +155,7 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
DRM_DEBUG("done.\n");
}
-void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
+void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
@@ -181,11 +178,10 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
head = MGA_READ(MGA_PRIMADDRESS);
- if (head == dev_priv->primary->offset) {
+ if (head == dev_priv->primary->offset)
primary->space = primary->size;
- } else {
+ else
primary->space = head - dev_priv->primary->offset;
- }
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
@@ -199,7 +195,7 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
DRM_DEBUG("done.\n");
}
-void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
+void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -220,11 +216,11 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
* Freelist management
*/
-#define MGA_BUFFER_USED ~0
+#define MGA_BUFFER_USED (~0)
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
-static void mga_freelist_print(struct drm_device * dev)
+static void mga_freelist_print(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -245,7 +241,7 @@ static void mga_freelist_print(struct drm_device * dev)
}
#endif
-static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
+static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
@@ -288,7 +284,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
return 0;
}
-static void mga_freelist_cleanup(struct drm_device * dev)
+static void mga_freelist_cleanup(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -308,7 +304,7 @@ static void mga_freelist_cleanup(struct drm_device * dev)
#if 0
/* FIXME: Still needed?
*/
-static void mga_freelist_reset(struct drm_device * dev)
+static void mga_freelist_reset(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
@@ -356,7 +352,7 @@ static struct drm_buf *mga_freelist_get(struct drm_device * dev)
return NULL;
}
-int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -391,7 +387,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
* DMA initialization, cleanup
*/
-int mga_driver_load(struct drm_device * dev, unsigned long flags)
+int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -405,13 +401,10 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
- dev_priv->mmio_base = drm_get_resource_start(dev, 1);
- dev_priv->mmio_size = drm_get_resource_len(dev, 1);
+ pci_set_master(dev->pdev);
- dev->counters += 3;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
+ dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
+ dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
ret = drm_vblank_init(dev, 1);
@@ -428,7 +421,7 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
* Bootstrap the driver for AGP DMA.
*
* \todo
- * Investigate whether there is any benifit to storing the WARP microcode in
+ * Investigate whether there is any benefit to storing the WARP microcode in
* AGP memory. If not, the microcode may as well always be put in PCI
* memory.
*
@@ -439,8 +432,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
-static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -481,11 +474,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
*/
if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
- if (mode.mode & 0x02) {
+ if (mode.mode & 0x02)
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
- } else {
+ else
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
- }
}
/* Allocate and bind AGP memory. */
@@ -593,8 +585,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
return 0;
}
#else
-static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
return -EINVAL;
}
@@ -614,8 +606,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
-static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -678,9 +670,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci(dev, &req);
- if (!err) {
+ if (!err)
break;
- }
}
if (bin_count == 0) {
@@ -704,10 +695,10 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
return 0;
}
-static int mga_do_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
- const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
+ const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -737,17 +728,15 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
* carve off portions of it for internal uses. The remaining memory
* is returned to user-mode to be used for AGP textures.
*/
- if (is_agp) {
+ if (is_agp)
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
- }
/* If we attempted to initialize the card for AGP DMA but failed,
* clean-up any mess that may have been created.
*/
- if (err) {
+ if (err)
mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
- }
/* Not only do we want to try and initialized PCI cards for PCI DMA,
* but we also try to initialized AGP cards that could not be
@@ -757,9 +746,8 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
* AGP memory, etc.
*/
- if (!is_agp || err) {
+ if (!is_agp || err)
err = mga_do_pci_dma_bootstrap(dev, dma_bs);
- }
return err;
}
@@ -792,7 +780,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,
return err;
}
-static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
+static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -800,11 +788,10 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
dev_priv = dev->dev_private;
- if (init->sgram) {
+ if (init->sgram)
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
- } else {
+ else
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
- }
dev_priv->maccess = init->maccess;
dev_priv->fb_cpp = init->fb_cpp;
@@ -975,9 +962,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
dev_priv->agp_handle = 0;
}
- if ((dev->agp != NULL) && dev->agp->acquired) {
+ if ((dev->agp != NULL) && dev->agp->acquired)
err = drm_agp_release(dev);
- }
#endif
}
@@ -998,9 +984,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
memset(dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys));
- if (dev_priv->head != NULL) {
+ if (dev_priv->head != NULL)
mga_freelist_cleanup(dev);
- }
}
return err;
@@ -1017,9 +1002,8 @@ int mga_dma_init(struct drm_device *dev, void *data,
switch (init->func) {
case MGA_INIT_DMA:
err = mga_do_init_dma(dev, init);
- if (err) {
+ if (err)
(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
- }
return err;
case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
@@ -1047,9 +1031,8 @@ int mga_dma_flush(struct drm_device *dev, void *data,
WRAP_WAIT_WITH_RETURN(dev_priv);
- if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
+ if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
mga_do_dma_flush(dev_priv);
- }
if (lock->flags & _DRM_LOCK_QUIESCENT) {
#if MGA_DMA_DEBUG
@@ -1079,8 +1062,8 @@ int mga_dma_reset(struct drm_device *dev, void *data,
* DMA buffer management
*/
-static int mga_dma_get_buffers(struct drm_device * dev,
- struct drm_file *file_priv, struct drm_dma * d)
+static int mga_dma_get_buffers(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_dma *d)
{
struct drm_buf *buf;
int i;
@@ -1092,10 +1075,10 @@ static int mga_dma_get_buffers(struct drm_device * dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i],
+ if (copy_to_user(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i],
+ if (copy_to_user(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return -EFAULT;
@@ -1134,9 +1117,8 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
d->granted_count = 0;
- if (d->request_count) {
+ if (d->request_count)
ret = mga_dma_get_buffers(dev, file_priv, d);
- }
return ret;
}
@@ -1144,7 +1126,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
/**
* Called just before the module is unloaded.
*/
-int mga_driver_unload(struct drm_device * dev)
+int mga_driver_unload(struct drm_device *dev)
{
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -1155,12 +1137,12 @@ int mga_driver_unload(struct drm_device * dev)
/**
* Called when the last opener of the device is closed.
*/
-void mga_driver_lastclose(struct drm_device * dev)
+void mga_driver_lastclose(struct drm_device *dev)
{
mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
-int mga_driver_dma_quiescent(struct drm_device * dev)
+int mga_driver_dma_quiescent(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index ddfe16197b5..6b1a87c8aac 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -29,22 +29,36 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include "drmP.h"
-#include "drm.h"
-#include "mga_drm.h"
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
#include "mga_drv.h"
-#include "drm_pciids.h"
+#include <drm/drm_pciids.h>
-static int mga_driver_device_is_agp(struct drm_device * dev);
+static int mga_driver_device_is_agp(struct drm_device *dev);
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
+static const struct file_operations mga_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mga_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_mga_buf_priv_t),
.load = mga_driver_load,
@@ -59,28 +73,9 @@ static struct drm_driver driver = {
.irq_postinstall = mga_driver_irq_postinstall,
.irq_uninstall = mga_driver_irq_uninstall,
.irq_handler = mga_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mga_compat_ioctl,
-#endif
- },
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
+ .fops = &mga_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -89,15 +84,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver mga_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init mga_init(void)
{
driver.num_ioctls = mga_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &mga_pci_driver);
}
static void __exit mga_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &mga_pci_driver);
}
module_init(mga_init);
@@ -119,7 +119,7 @@ MODULE_LICENSE("GPL and additional rights");
* \returns
* If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
*/
-static int mga_driver_device_is_agp(struct drm_device * dev)
+static int mga_driver_device_is_agp(struct drm_device *dev)
{
const struct pci_dev *const pdev = dev->pdev;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index be6c6b9b0e8..fe453213600 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
unsigned int agp_size;
} drm_mga_private_t;
-extern struct drm_ioctl_desc mga_ioctls[];
+extern const struct drm_ioctl_desc mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
@@ -164,60 +164,41 @@ extern int mga_dma_reset(struct drm_device *dev, void *data,
extern int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
-extern int mga_driver_unload(struct drm_device * dev);
-extern void mga_driver_lastclose(struct drm_device * dev);
-extern int mga_driver_dma_quiescent(struct drm_device * dev);
+extern int mga_driver_unload(struct drm_device *dev);
+extern void mga_driver_lastclose(struct drm_device *dev);
+extern int mga_driver_dma_quiescent(struct drm_device *dev);
-extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
+extern int mga_do_wait_for_idle(drm_mga_private_t *dev_priv);
-extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
-extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
-extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
+extern void mga_do_dma_flush(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv);
-extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
+extern int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf);
/* mga_warp.c */
-extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
-extern int mga_warp_init(drm_mga_private_t * dev_priv);
+extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
+extern int mga_warp_init(drm_mga_private_t *dev_priv);
/* mga_irq.c */
extern int mga_enable_vblank(struct drm_device *dev, int crtc);
extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
-extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
-extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
-extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
-extern void mga_driver_irq_preinstall(struct drm_device * dev);
+extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
+extern void mga_driver_irq_preinstall(struct drm_device *dev);
extern int mga_driver_irq_postinstall(struct drm_device *dev);
-extern void mga_driver_irq_uninstall(struct drm_device * dev);
+extern void mga_driver_irq_uninstall(struct drm_device *dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
+#define mga_flush_write_combine() wmb()
-#if defined(__linux__) && defined(__alpha__)
-#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle))
-#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg)
-
-#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg )
-#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg )
-
-#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
-#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
-#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
-#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
-
-static inline u32 _MGA_READ(u32 * addr)
-{
- DRM_MEMORYBARRIER();
- return *(volatile u32 *)addr;
-}
-#else
-#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg))
-#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg))
-#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val))
-#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
-#endif
+#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
+#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
+#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
+#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
@@ -233,40 +214,39 @@ static inline u32 _MGA_READ(u32 * addr)
* Helper macross...
*/
-#define MGA_EMIT_STATE( dev_priv, dirty ) \
+#define MGA_EMIT_STATE(dev_priv, dirty) \
do { \
- if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
- if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \
- mga_g400_emit_state( dev_priv ); \
- } else { \
- mga_g200_emit_state( dev_priv ); \
- } \
+ if ((dirty) & ~MGA_UPLOAD_CLIPRECTS) { \
+ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) \
+ mga_g400_emit_state(dev_priv); \
+ else \
+ mga_g200_emit_state(dev_priv); \
} \
} while (0)
-#define WRAP_TEST_WITH_RETURN( dev_priv ) \
+#define WRAP_TEST_WITH_RETURN(dev_priv) \
do { \
- if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( mga_is_idle( dev_priv ) ) { \
- mga_do_dma_wrap_end( dev_priv ); \
- } else if ( dev_priv->prim.space < \
- dev_priv->prim.high_mark ) { \
- if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "wrap...\n"); \
- return -EBUSY; \
+ if (test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (mga_is_idle(dev_priv)) { \
+ mga_do_dma_wrap_end(dev_priv); \
+ } else if (dev_priv->prim.space < \
+ dev_priv->prim.high_mark) { \
+ if (MGA_DMA_DEBUG) \
+ DRM_INFO("wrap...\n"); \
+ return -EBUSY; \
} \
} \
} while (0)
-#define WRAP_WAIT_WITH_RETURN( dev_priv ) \
+#define WRAP_WAIT_WITH_RETURN(dev_priv) \
do { \
- if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
- if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "wrap...\n"); \
- return -EBUSY; \
+ if (test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (mga_do_wait_for_idle(dev_priv) < 0) { \
+ if (MGA_DMA_DEBUG) \
+ DRM_INFO("wrap...\n"); \
+ return -EBUSY; \
} \
- mga_do_dma_wrap_end( dev_priv ); \
+ mga_do_dma_wrap_end(dev_priv); \
} \
} while (0)
@@ -280,12 +260,12 @@ do { \
#define DMA_BLOCK_SIZE (5 * sizeof(u32))
-#define BEGIN_DMA( n ) \
+#define BEGIN_DMA(n) \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
- DRM_INFO( " space=0x%x req=0x%Zx\n", \
- dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
+ if (MGA_VERBOSE) { \
+ DRM_INFO("BEGIN_DMA(%d)\n", (n)); \
+ DRM_INFO(" space=0x%x req=0x%Zx\n", \
+ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
@@ -293,9 +273,9 @@ do { \
#define BEGIN_DMA_WRAP() \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA()\n" ); \
- DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
+ if (MGA_VERBOSE) { \
+ DRM_INFO("BEGIN_DMA()\n"); \
+ DRM_INFO(" space=0x%x\n", dev_priv->prim.space); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
@@ -304,72 +284,68 @@ do { \
#define ADVANCE_DMA() \
do { \
dev_priv->prim.tail = write; \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
- write, dev_priv->prim.space ); \
- } \
+ if (MGA_VERBOSE) \
+ DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
+ write, dev_priv->prim.space); \
} while (0)
#define FLUSH_DMA() \
do { \
- if ( 0 ) { \
- DRM_INFO( "\n" ); \
- DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
- dev_priv->prim.tail, \
- (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
- dev_priv->primary->offset)); \
+ if (0) { \
+ DRM_INFO("\n"); \
+ DRM_INFO(" tail=0x%06x head=0x%06lx\n", \
+ dev_priv->prim.tail, \
+ (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
+ dev_priv->primary->offset)); \
} \
- if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( dev_priv->prim.space < \
- dev_priv->prim.high_mark ) { \
- mga_do_dma_wrap_start( dev_priv ); \
- } else { \
- mga_do_dma_flush( dev_priv ); \
- } \
+ if (!test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (dev_priv->prim.space < dev_priv->prim.high_mark) \
+ mga_do_dma_wrap_start(dev_priv); \
+ else \
+ mga_do_dma_flush(dev_priv); \
} \
} while (0)
/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
*/
-#define DMA_WRITE( offset, val ) \
+#define DMA_WRITE(offset, val) \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
- (u32)(val), write + (offset) * sizeof(u32) ); \
- } \
+ if (MGA_VERBOSE) \
+ DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
+ (u32)(val), write + (offset) * sizeof(u32)); \
*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
} while (0)
-#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \
+#define DMA_BLOCK(reg0, val0, reg1, val1, reg2, val2, reg3, val3) \
do { \
- DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \
- (DMAREG( reg1 ) << 8) | \
- (DMAREG( reg2 ) << 16) | \
- (DMAREG( reg3 ) << 24)) ); \
- DMA_WRITE( 1, val0 ); \
- DMA_WRITE( 2, val1 ); \
- DMA_WRITE( 3, val2 ); \
- DMA_WRITE( 4, val3 ); \
+ DMA_WRITE(0, ((DMAREG(reg0) << 0) | \
+ (DMAREG(reg1) << 8) | \
+ (DMAREG(reg2) << 16) | \
+ (DMAREG(reg3) << 24))); \
+ DMA_WRITE(1, val0); \
+ DMA_WRITE(2, val1); \
+ DMA_WRITE(3, val2); \
+ DMA_WRITE(4, val3); \
write += DMA_BLOCK_SIZE; \
} while (0)
/* Buffer aging via primary DMA stream head pointer.
*/
-#define SET_AGE( age, h, w ) \
+#define SET_AGE(age, h, w) \
do { \
(age)->head = h; \
(age)->wrap = w; \
} while (0)
-#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \
- ( (age)->wrap == w && \
- (age)->head < h ) )
+#define TEST_AGE(age, h, w) ((age)->wrap < w || \
+ ((age)->wrap == w && \
+ (age)->head < h))
-#define AGE_BUFFER( buf_priv ) \
+#define AGE_BUFFER(buf_priv) \
do { \
drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
- if ( (buf_priv)->dispatched ) { \
+ if ((buf_priv)->dispatched) { \
entry->age.head = (dev_priv->prim.tail + \
dev_priv->primary->offset); \
entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
@@ -681,7 +657,7 @@ do { \
/* Simple idle test.
*/
-static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
+static __inline__ int mga_is_idle(drm_mga_private_t *dev_priv)
{
u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
return (status == MGA_ENDPRDMASTS);
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index c1f877b7bac..729bfd56b55 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -32,9 +32,9 @@
*/
#include <linux/compat.h>
-#include "drmP.h"
-#include "drm.h"
-#include "mga_drm.h"
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
+#include "mga_drv.h"
typedef struct drm32_mga_init {
int func;
@@ -214,7 +214,7 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index daa6041a483..1b071b8ff9d 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -31,9 +31,8 @@
* Eric Anholt <anholt@FreeBSD.org>
*/
-#include "drmP.h"
-#include "drm.h"
-#include "mga_drm.h"
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
#include "mga_drv.h"
u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
@@ -48,7 +47,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
}
-irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t mga_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -76,12 +75,11 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
- if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
+ if ((prim_start & ~0x03) != (prim_end & ~0x03))
MGA_WRITE(MGA_PRIMEND, prim_end);
- }
atomic_inc(&dev_priv->last_fence_retired);
- DRM_WAKEUP(&dev_priv->fence_queue);
+ wake_up(&dev_priv->fence_queue);
handled = 1;
}
@@ -120,7 +118,7 @@ void mga_disable_vblank(struct drm_device *dev, int crtc)
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
-int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
+int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
@@ -130,7 +128,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
* by about a day rather than she wants to wait for years
* using fences.
*/
- DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- *sequence) <= (1 << 23)));
@@ -139,7 +137,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
return ret;
}
-void mga_driver_irq_preinstall(struct drm_device * dev)
+void mga_driver_irq_preinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -153,7 +151,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
+ init_waitqueue_head(&dev_priv->fence_queue);
/* Turn on soft trap interrupt. Vertical blank interrupts are enabled
* in mga_enable_vblank.
@@ -162,7 +160,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void mga_driver_irq_uninstall(struct drm_device * dev)
+void mga_driver_irq_uninstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
@@ -171,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device * dev)
/* Disable *all* interrupts */
MGA_WRITE(MGA_IEN, 0);
- dev->irq_enabled = 0;
+ dev->irq_enabled = false;
}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index a53b848e0f1..792f924496f 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -32,17 +32,16 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include "drmP.h"
-#include "drm.h"
-#include "mga_drm.h"
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
#include "mga_drv.h"
/* ================================================================
* DMA hardware state programming functions
*/
-static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
- struct drm_clip_rect * box)
+static void mga_emit_clip_rect(drm_mga_private_t *dev_priv,
+ struct drm_clip_rect *box)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -66,7 +65,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -89,7 +88,7 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -116,7 +115,7 @@ static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -144,7 +143,7 @@ static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -184,7 +183,7 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
@@ -223,7 +222,7 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->warp_pipe;
@@ -250,7 +249,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->warp_pipe;
@@ -327,7 +326,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
+static void mga_g200_emit_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -348,7 +347,7 @@ static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
}
}
-static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
+static void mga_g400_emit_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -381,7 +380,7 @@ static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
/* Disallow all write destinations except the front and backbuffer.
*/
-static int mga_verify_context(drm_mga_private_t * dev_priv)
+static int mga_verify_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -400,7 +399,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
/* Disallow texture reads from PCI space.
*/
-static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
+static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
@@ -417,7 +416,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
return 0;
}
-static int mga_verify_state(drm_mga_private_t * dev_priv)
+static int mga_verify_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -446,7 +445,7 @@ static int mga_verify_state(drm_mga_private_t * dev_priv)
return (ret == 0);
}
-static int mga_verify_iload(drm_mga_private_t * dev_priv,
+static int mga_verify_iload(drm_mga_private_t *dev_priv,
unsigned int dstorg, unsigned int length)
{
if (dstorg < dev_priv->texture_offset ||
@@ -465,7 +464,7 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
return 0;
}
-static int mga_verify_blit(drm_mga_private_t * dev_priv,
+static int mga_verify_blit(drm_mga_private_t *dev_priv,
unsigned int srcorg, unsigned int dstorg)
{
if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
@@ -480,7 +479,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
*
*/
-static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
+static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -568,7 +567,7 @@ static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * cl
FLUSH_DMA();
}
-static void mga_dma_dispatch_swap(struct drm_device * dev)
+static void mga_dma_dispatch_swap(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -622,7 +621,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)
DRM_DEBUG("... done.\n");
}
-static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
+static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -669,7 +668,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu
FLUSH_DMA();
}
-static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
+static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf,
unsigned int start, unsigned int end)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -718,7 +717,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b
/* This copies a 64 byte aligned agp region to the frambuffer with a
* standard blit, the ioctl needs to do checking.
*/
-static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
+static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf,
unsigned int dstorg, unsigned int length)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -766,7 +765,7 @@ static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf
FLUSH_DMA();
}
-static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
+static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -801,9 +800,8 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit
int w = pbox[i].x2 - pbox[i].x1 - 1;
int start;
- if (blit->ydir == -1) {
+ if (blit->ydir == -1)
srcy = blit->height - srcy - 1;
- }
start = srcy * blit->src_pitch + srcx;
@@ -1022,7 +1020,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
switch (param->param) {
case MGA_PARAM_IRQ_NR:
- value = drm_dev_to_irq(dev);
+ value = dev->pdev->irq;
break;
case MGA_PARAM_CARD_TYPE:
value = dev_priv->chipset;
@@ -1031,7 +1029,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ if (copy_to_user(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
@@ -1085,20 +1083,20 @@ file_priv)
return 0;
}
-struct drm_ioctl_desc mga_ioctls[] = {
- DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+const struct drm_ioctl_desc mga_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
-int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
+int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 9aad4847afd..0b76352260a 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -30,10 +30,10 @@
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
-#include "drmP.h"
-#include "drm.h"
-#include "mga_drm.h"
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
#include "mga_drv.h"
#define FIRMWARE_G200 "matrox/g200_warp.fw"
@@ -46,7 +46,7 @@ MODULE_FIRMWARE(FIRMWARE_G400);
#define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN)
-int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
+int mga_warp_install_microcode(drm_mga_private_t *dev_priv)
{
unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset;
@@ -133,7 +133,7 @@ out:
#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
-int mga_warp_init(drm_mga_private_t * dev_priv)
+int mga_warp_init(drm_mga_private_t *dev_priv)
{
u32 wmisc;
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
new file mode 100644
index 00000000000..3a1c5fbae54
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -0,0 +1,16 @@
+config DRM_MGAG200
+ tristate "Kernel modesetting driver for MGA G200 server engines"
+ depends on DRM && PCI
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for the MGA G200 server chips, it
+ does not support the original MGA G200 or any of the desktop
+ chips. It requires 0.3.0 of the modesetting userspace driver,
+ and a version of mga driver that will fail on KMS enabled
+ devices.
+
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
new file mode 100644
index 00000000000..a9a0300f09f
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+mgag200-y := mgag200_main.o mgag200_mode.o mgag200_cursor.o \
+ mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+
+obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
new file mode 100644
index 00000000000..9f9780b7ddf
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2013 Matrox Graphics
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Author: Christopher Harvey <charvey@matrox.com>
+ */
+
+#include <drm/drmP.h>
+#include "mgag200_drv.h"
+
+static bool warn_transparent = true;
+static bool warn_palette = true;
+
+/*
+ Hide the cursor off screen. We can't disable the cursor hardware because it
+ takes too long to re-activate and causes momentary corruption
+*/
+static void mga_hide_cursor(struct mga_device *mdev)
+{
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ if (mdev->cursor.pixels_1->pin_count)
+ mgag200_bo_unpin(mdev->cursor.pixels_1);
+ if (mdev->cursor.pixels_2->pin_count)
+ mgag200_bo_unpin(mdev->cursor.pixels_2);
+}
+
+int mga_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
+ struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
+ struct mgag200_bo *pixels_current = mdev->cursor.pixels_current;
+ struct mgag200_bo *pixels_prev = mdev->cursor.pixels_prev;
+ struct drm_gem_object *obj;
+ struct mgag200_bo *bo = NULL;
+ int ret = 0;
+ unsigned int i, row, col;
+ uint32_t colour_set[16];
+ uint32_t *next_space = &colour_set[0];
+ uint32_t *palette_iter;
+ uint32_t this_colour;
+ bool found = false;
+ int colour_count = 0;
+ u64 gpu_addr;
+ u8 reg_index;
+ u8 this_row[48];
+
+ if (!pixels_1 || !pixels_2) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -ENOTSUPP; /* Didn't allocate space for cursors */
+ }
+
+ if ((width != 64 || height != 64) && handle) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -EINVAL;
+ }
+
+ BUG_ON(pixels_1 != pixels_current && pixels_1 != pixels_prev);
+ BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
+ BUG_ON(pixels_current == pixels_prev);
+
+ ret = mgag200_bo_reserve(pixels_1, true);
+ if (ret) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return ret;
+ }
+ ret = mgag200_bo_reserve(pixels_2, true);
+ if (ret) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ mgag200_bo_unreserve(pixels_1);
+ return ret;
+ }
+
+ if (!handle) {
+ mga_hide_cursor(mdev);
+ ret = 0;
+ goto out1;
+ }
+
+ /* Move cursor buffers into VRAM if they aren't already */
+ if (!pixels_1->pin_count) {
+ ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
+ &mdev->cursor.pixels_1_gpu_addr);
+ if (ret)
+ goto out1;
+ }
+ if (!pixels_2->pin_count) {
+ ret = mgag200_bo_pin(pixels_2, TTM_PL_FLAG_VRAM,
+ &mdev->cursor.pixels_2_gpu_addr);
+ if (ret) {
+ mgag200_bo_unpin(pixels_1);
+ goto out1;
+ }
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOENT;
+ goto out1;
+ }
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, true);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "failed to reserve user bo\n");
+ goto out1;
+ }
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "failed to kmap user buffer updates\n");
+ goto out2;
+ }
+ }
+
+ memset(&colour_set[0], 0, sizeof(uint32_t)*16);
+ /* width*height*4 = 16384 */
+ for (i = 0; i < 16384; i += 4) {
+ this_colour = ioread32(bo->kmap.virtual + i);
+ /* No transparency */
+ if (this_colour>>24 != 0xff &&
+ this_colour>>24 != 0x0) {
+ if (warn_transparent) {
+ dev_info(&dev->pdev->dev, "Video card doesn't support cursors with partial transparency.\n");
+ dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
+ warn_transparent = false; /* Only tell the user once. */
+ }
+ ret = -EINVAL;
+ goto out3;
+ }
+ /* Don't need to store transparent pixels as colours */
+ if (this_colour>>24 == 0x0)
+ continue;
+ found = false;
+ for (palette_iter = &colour_set[0]; palette_iter != next_space; palette_iter++) {
+ if (*palette_iter == this_colour) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ continue;
+ /* We only support 4bit paletted cursors */
+ if (colour_count >= 16) {
+ if (warn_palette) {
+ dev_info(&dev->pdev->dev, "Video card only supports cursors with up to 16 colours.\n");
+ dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
+ warn_palette = false; /* Only tell the user once. */
+ }
+ ret = -EINVAL;
+ goto out3;
+ }
+ *next_space = this_colour;
+ next_space++;
+ colour_count++;
+ }
+
+ /* Program colours from cursor icon into palette */
+ for (i = 0; i < colour_count; i++) {
+ if (i <= 2)
+ reg_index = 0x8 + i*0x4;
+ else
+ reg_index = 0x60 + i*0x3;
+ WREG_DAC(reg_index, colour_set[i] & 0xff);
+ WREG_DAC(reg_index+1, colour_set[i]>>8 & 0xff);
+ WREG_DAC(reg_index+2, colour_set[i]>>16 & 0xff);
+ BUG_ON((colour_set[i]>>24 & 0xff) != 0xff);
+ }
+
+ /* Map up-coming buffer to write colour indices */
+ if (!pixels_prev->kmap.virtual) {
+ ret = ttm_bo_kmap(&pixels_prev->bo, 0,
+ pixels_prev->bo.num_pages,
+ &pixels_prev->kmap);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "failed to kmap cursor updates\n");
+ goto out3;
+ }
+ }
+
+ /* now write colour indices into hardware cursor buffer */
+ for (row = 0; row < 64; row++) {
+ memset(&this_row[0], 0, 48);
+ for (col = 0; col < 64; col++) {
+ this_colour = ioread32(bo->kmap.virtual + 4*(col + 64*row));
+ /* write transparent pixels */
+ if (this_colour>>24 == 0x0) {
+ this_row[47 - col/8] |= 0x80>>(col%8);
+ continue;
+ }
+
+ /* write colour index here */
+ for (i = 0; i < colour_count; i++) {
+ if (colour_set[i] == this_colour) {
+ if (col % 2)
+ this_row[col/2] |= i<<4;
+ else
+ this_row[col/2] |= i;
+ break;
+ }
+ }
+ }
+ memcpy_toio(pixels_prev->kmap.virtual + row*48, &this_row[0], 48);
+ }
+
+ /* Program gpu address of cursor buffer */
+ if (pixels_prev == pixels_1)
+ gpu_addr = mdev->cursor.pixels_1_gpu_addr;
+ else
+ gpu_addr = mdev->cursor.pixels_2_gpu_addr;
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((gpu_addr>>10) & 0xff));
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((gpu_addr>>18) & 0x3f));
+
+ /* Adjust cursor control register to turn on the cursor */
+ WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
+
+ /* Now swap internal buffer pointers */
+ if (mdev->cursor.pixels_1 == mdev->cursor.pixels_prev) {
+ mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
+ mdev->cursor.pixels_current = mdev->cursor.pixels_1;
+ } else if (mdev->cursor.pixels_1 == mdev->cursor.pixels_current) {
+ mdev->cursor.pixels_prev = mdev->cursor.pixels_1;
+ mdev->cursor.pixels_current = mdev->cursor.pixels_2;
+ } else {
+ BUG();
+ }
+ ret = 0;
+
+ ttm_bo_kunmap(&pixels_prev->kmap);
+ out3:
+ ttm_bo_kunmap(&bo->kmap);
+ out2:
+ mgag200_bo_unreserve(bo);
+ out1:
+ if (ret)
+ mga_hide_cursor(mdev);
+ mgag200_bo_unreserve(pixels_1);
+ mgag200_bo_unreserve(pixels_2);
+ return ret;
+}
+
+int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+ /* Our origin is at (64,64) */
+ x += 64;
+ y += 64;
+
+ BUG_ON(x <= 0);
+ BUG_ON(y <= 0);
+ BUG_ON(x & ~0xffff);
+ BUG_ON(y & ~0xffff);
+
+ WREG8(MGA_CURPOSXL, x & 0xff);
+ WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
+
+ WREG8(MGA_CURPOSYL, y & 0xff);
+ WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
+ return 0;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
new file mode 100644
index 00000000000..f15ea3c4a90
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <drm/drmP.h>
+
+#include "mgag200_drv.h"
+
+#include <drm/drm_pciids.h>
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+int mgag200_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, mgag200_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+ { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+ { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+ { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
+ { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
+ { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+ kfree(ap);
+}
+
+
+static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ mgag200_kick_out_firmware_fb(pdev);
+
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void mga_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations mgag200_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = mgag200_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
+ .load = mgag200_driver_load,
+ .unload = mgag200_driver_unload,
+ .fops = &mgag200_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_free_object = mgag200_gem_free_object,
+ .dumb_create = mgag200_dumb_create,
+ .dumb_map_offset = mgag200_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+};
+
+static struct pci_driver mgag200_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = mga_pci_probe,
+ .remove = mga_pci_remove,
+};
+
+static int __init mgag200_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && mgag200_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (mgag200_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &mgag200_pci_driver);
+}
+
+static void __exit mgag200_exit(void)
+{
+ drm_pci_exit(&driver, &mgag200_pci_driver);
+}
+
+module_init(mgag200_init);
+module_exit(mgag200_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
new file mode 100644
index 00000000000..cf11ee68a6d
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#ifndef __MGAG200_DRV_H__
+#define __MGAG200_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "mgag200_reg.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "mgag200"
+#define DRIVER_DESC "MGA G200 SE"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define MGAG200FB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+
+#define ATTR_INDEX 0x1fc0
+#define ATTR_DATA 0x1fc1
+
+#define WREG_ATTR(reg, v) \
+ do { \
+ RREG8(0x1fda); \
+ WREG8(ATTR_INDEX, reg); \
+ WREG8(ATTR_DATA, v); \
+ } while (0) \
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(MGAREG_SEQ_INDEX, reg); \
+ WREG8(MGAREG_SEQ_DATA, v); \
+ } while (0) \
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTC_INDEX, reg); \
+ WREG8(MGAREG_CRTC_DATA, v); \
+ } while (0) \
+
+
+#define WREG_ECRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTCEXT_INDEX, reg); \
+ WREG8(MGAREG_CRTCEXT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0x1fce
+#define GFX_DATA 0x1fcf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+#define DAC_INDEX 0x3c00
+#define DAC_DATA 0x3c0a
+
+#define WREG_DAC(reg, v) \
+ do { \
+ WREG8(DAC_INDEX, reg); \
+ WREG8(DAC_DATA, v); \
+ } while (0) \
+
+#define MGA_MISC_OUT 0x1fc2
+#define MGA_MISC_IN 0x1fcc
+
+#define MGAG200_MAX_FB_HEIGHT 4096
+#define MGAG200_MAX_FB_WIDTH 4096
+
+#define MATROX_DPMS_CLEARED (-1)
+
+#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
+#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
+#define to_mga_connector(x) container_of(x, struct mga_connector, base)
+#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
+
+struct mga_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct mga_fbdev {
+ struct drm_fb_helper helper;
+ struct mga_framebuffer mfb;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+};
+
+struct mga_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct mga_mode_info {
+ bool mode_config_initialized;
+ struct mga_crtc *crtc;
+};
+
+struct mga_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+
+struct mga_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+ int data, clock;
+};
+
+struct mga_connector {
+ struct drm_connector base;
+ struct mga_i2c_chan *i2c;
+};
+
+struct mga_cursor {
+ /*
+ We have to have 2 buffers for the cursor to avoid occasional
+ corruption while switching cursor icons.
+ If either of these is NULL, then don't do hardware cursors, and
+ fall back to software.
+ */
+ struct mgag200_bo *pixels_1;
+ struct mgag200_bo *pixels_2;
+ u64 pixels_1_gpu_addr, pixels_2_gpu_addr;
+ /* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
+ struct mgag200_bo *pixels_current;
+ /* The previously displayed icon */
+ struct mgag200_bo *pixels_prev;
+};
+
+struct mga_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+ resource_size_t vram_window;
+};
+
+enum mga_type {
+ G200_SE_A,
+ G200_SE_B,
+ G200_WB,
+ G200_EV,
+ G200_EH,
+ G200_ER,
+};
+
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+
+struct mga_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ drm_local_map_t *framebuffer;
+
+ struct mga_mc mc;
+ struct mga_mode_info mode_info;
+
+ struct mga_fbdev *mfbdev;
+ struct mga_cursor cursor;
+
+ bool suspended;
+ int num_crtc;
+ enum mga_type type;
+ int has_sdram;
+ struct drm_display_mode mode;
+
+ int bpp_shifts[4];
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ } ttm;
+
+ /* SE model number stored in reg 0x1e24 */
+ u32 unique_rev_id;
+};
+
+
+struct mgag200_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
+
+static inline struct mgag200_bo *
+mgag200_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct mgag200_bo, bo);
+}
+ /* mgag200_crtc.c */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ /* mgag200_mode.c */
+int mgag200_modeset_init(struct mga_device *mdev);
+void mgag200_modeset_fini(struct mga_device *mdev);
+
+ /* mgag200_fb.c */
+int mgag200_fbdev_init(struct mga_device *mdev);
+void mgag200_fbdev_fini(struct mga_device *mdev);
+
+ /* mgag200_main.c */
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *mfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
+int mgag200_driver_unload(struct drm_device *dev);
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+void mgag200_gem_free_object(struct drm_gem_object *obj);
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+ /* mgag200_i2c.c */
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
+
+static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void mgag200_bo_unreserve(struct mgag200_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pastbo);
+int mgag200_mm_init(struct mga_device *mdev);
+void mgag200_mm_fini(struct mga_device *mdev);
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int mgag200_bo_unpin(struct mgag200_bo *bo);
+int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+ /* mgag200_cursor.c */
+int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
new file mode 100644
index 00000000000..13b7dd83faa
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <linux/fb.h>
+
+#include "mgag200_drv.h"
+
+static void mga_dirty_update(struct mga_fbdev *mfbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct mgag200_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+ int ret = -EBUSY;
+ bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
+
+ obj = mfbdev->mfb.obj;
+ bo = gem_to_mga_bo(obj);
+
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+ if (drm_can_sleep())
+ ret = mgag200_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&mfbdev->dirty_lock, flags);
+
+ if (mfbdev->y1 < y)
+ y = mfbdev->y1;
+ if (mfbdev->y2 > y2)
+ y2 = mfbdev->y2;
+ if (mfbdev->x1 < x)
+ x = mfbdev->x1;
+ if (mfbdev->x2 > x2)
+ x2 = mfbdev->x2;
+
+ if (store_for_later) {
+ mfbdev->x1 = x;
+ mfbdev->x2 = x2;
+ mfbdev->y1 = y;
+ mfbdev->y2 = y2;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+ return;
+ }
+
+ mfbdev->x1 = mfbdev->y1 = INT_MAX;
+ mfbdev->x2 = mfbdev->y2 = 0;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ mgag200_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i <= y2; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_bo_unreserve(bo);
+}
+
+static void mga_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_fillrect(info, rect);
+ mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void mga_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_copyarea(info, area);
+ mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void mga_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_imageblit(info, image);
+ mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops mgag200fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = mga_fillrect,
+ .fb_copyarea = mga_copyarea,
+ .fb_imageblit = mga_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int mgag200fb_create_object(struct mga_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 size;
+ struct drm_gem_object *gobj;
+ int ret = 0;
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = mgag200_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int mgag200fb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
+ struct drm_device *dev = mfbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct mga_device *mdev = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gobj = NULL;
+ struct device *device = &dev->pdev->dev;
+ struct mgag200_bo *bo;
+ int ret;
+ void *sysram;
+ int size;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_mga_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = mfbdev;
+
+ ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ mfbdev->sysram = sysram;
+ mfbdev->size = size;
+
+ fb = &mfbdev->mfb.base;
+
+ /* setup helper */
+ mfbdev->helper.fb = fb;
+ mfbdev->helper.fbdev = info;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ strcpy(info->fix.id, "mgadrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &mgag200fb_ops;
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = mdev->mc.vram_size;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+ return 0;
+out:
+ return ret;
+}
+
+static int mga_fbdev_destroy(struct drm_device *dev,
+ struct mga_fbdev *mfbdev)
+{
+ struct fb_info *info;
+ struct mga_framebuffer *mfb = &mfbdev->mfb;
+
+ if (mfbdev->helper.fbdev) {
+ info = mfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (mfb->obj) {
+ drm_gem_object_unreference_unlocked(mfb->obj);
+ mfb->obj = NULL;
+ }
+ drm_fb_helper_fini(&mfbdev->helper);
+ vfree(mfbdev->sysram);
+ drm_framebuffer_unregister_private(&mfb->base);
+ drm_framebuffer_cleanup(&mfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+ .gamma_set = mga_crtc_fb_gamma_set,
+ .gamma_get = mga_crtc_fb_gamma_get,
+ .fb_probe = mgag200fb_create,
+};
+
+int mgag200_fbdev_init(struct mga_device *mdev)
+{
+ struct mga_fbdev *mfbdev;
+ int ret;
+ int bpp_sel = 32;
+
+ /* prefer 16bpp on low end gpus with limited VRAM */
+ if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ bpp_sel = 16;
+
+ mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
+ if (!mfbdev)
+ return -ENOMEM;
+
+ mdev->mfbdev = mfbdev;
+ mfbdev->helper.funcs = &mga_fb_helper_funcs;
+ spin_lock_init(&mfbdev->dirty_lock);
+
+ ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+ mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+ if (ret)
+ return ret;
+
+ drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(mdev->dev);
+
+ drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
+
+ return 0;
+}
+
+void mgag200_fbdev_fini(struct mga_device *mdev)
+{
+ if (!mdev->mfbdev)
+ return;
+
+ mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
new file mode 100644
index 00000000000..d3dcf54e623
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drmP.h>
+
+#include "mgag200_drv.h"
+
+static int mga_i2c_read_gpio(struct mga_device *mdev)
+{
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ return RREG8(DAC_DATA);
+}
+
+static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
+{
+ int tmp;
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = (RREG8(DAC_DATA) & mask) | val;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0);
+}
+
+static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
+{
+ if (state)
+ state = 0;
+ else
+ state = mask;
+ mga_i2c_set_gpio(mdev, ~mask, state);
+}
+
+static void mga_gpio_setsda(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->data, state);
+}
+
+static void mga_gpio_setscl(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->clock, state);
+}
+
+static int mga_gpio_getsda(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
+}
+
+static int mga_gpio_getscl(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+}
+
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_i2c_chan *i2c;
+ int ret;
+ int data, clock;
+
+ WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
+ WREG_DAC(MGA1064_GEN_IO_CTL, 0);
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ case G200_EV:
+ case G200_WB:
+ data = 1;
+ clock = 2;
+ break;
+ case G200_EH:
+ case G200_ER:
+ data = 2;
+ clock = 1;
+ break;
+ default:
+ data = 2;
+ clock = 8;
+ break;
+ }
+
+ i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->data = data;
+ i2c->clock = clock;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
+
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 10;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = mga_gpio_setsda;
+ i2c->bit.setscl = mga_gpio_setscl;
+ i2c->bit.getsda = mga_gpio_getsda;
+ i2c->bit.getscl = mga_gpio_getscl;
+
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ kfree(i2c);
+ i2c = NULL;
+ }
+ return i2c;
+}
+
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
new file mode 100644
index 00000000000..f6b283b8375
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "mgag200_drv.h"
+
+static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
+ if (mga_fb->obj)
+ drm_gem_object_unreference_unlocked(mga_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs mga_fb_funcs = {
+ .destroy = mga_user_framebuffer_destroy,
+};
+
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct drm_framebuffer *
+mgag200_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
+ if (!mga_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(mga_fb);
+ return ERR_PTR(ret);
+ }
+ return &mga_fb->base;
+}
+
+static const struct drm_mode_config_funcs mga_mode_funcs = {
+ .fb_create = mgag200_user_framebuffer_create,
+};
+
+static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
+{
+ int offset;
+ int orig;
+ int test1, test2;
+ int orig1, orig2;
+
+ /* Probe */
+ orig = ioread16(mem);
+ iowrite16(0, mem);
+
+ for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+ orig1 = ioread8(mem + offset);
+ orig2 = ioread8(mem + offset + 0x100);
+
+ iowrite16(0xaa55, mem + offset);
+ iowrite16(0xaa55, mem + offset + 0x100);
+
+ test1 = ioread16(mem + offset);
+ test2 = ioread16(mem);
+
+ iowrite16(orig1, mem + offset);
+ iowrite16(orig2, mem + offset + 0x100);
+
+ if (test1 != 0xaa55) {
+ break;
+ }
+
+ if (test2) {
+ break;
+ }
+ }
+
+ iowrite16(orig, mem);
+ return offset - 65536;
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int mga_vram_init(struct mga_device *mdev)
+{
+ void __iomem *mem;
+ struct apertures_struct *aper = alloc_apertures(1);
+ if (!aper)
+ return -ENOMEM;
+
+ /* BAR 0 is VRAM */
+ mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
+ mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+
+ aper->ranges[0].base = mdev->mc.vram_base;
+ aper->ranges[0].size = mdev->mc.vram_window;
+
+ remove_conflicting_framebuffers(aper, "mgafb", true);
+ kfree(aper);
+
+ if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
+ "mgadrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ mem = pci_iomap(mdev->dev->pdev, 0, 0);
+
+ mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+
+ pci_iounmap(mdev->dev->pdev, mem);
+
+ return 0;
+}
+
+static int mgag200_device_init(struct drm_device *dev,
+ uint32_t flags)
+{
+ struct mga_device *mdev = dev->dev_private;
+ int ret, option;
+
+ mdev->type = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ mdev->num_crtc = 1;
+
+ pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ mdev->has_sdram = !(option & (1 << 14));
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+
+ if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
+ "mgadrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
+ if (mdev->rmmio == NULL)
+ return -ENOMEM;
+
+ /* stash G200 SE model number for later use */
+ if (IS_G200_SE(mdev))
+ mdev->unique_rev_id = RREG32(0x1e24);
+
+ ret = mga_vram_init(mdev);
+ if (ret)
+ return ret;
+
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+ return 0;
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct mga_device *mdev;
+ int r;
+
+ mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
+ if (mdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)mdev;
+ mdev->dev = dev;
+
+ r = mgag200_device_init(dev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ return r;
+ }
+ r = mgag200_mm_init(mdev);
+ if (r)
+ goto out;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = (void *)&mga_mode_funcs;
+ if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ dev->mode_config.preferred_depth = 16;
+ else
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ r = mgag200_modeset_init(mdev);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+ goto out;
+ }
+
+ /* Make small buffers to store a hardware cursor (double buffered icon updates) */
+ mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
+ &mdev->cursor.pixels_1);
+ mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
+ &mdev->cursor.pixels_2);
+ if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1)
+ goto cursor_nospace;
+ mdev->cursor.pixels_current = mdev->cursor.pixels_1;
+ mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
+ goto cursor_done;
+ cursor_nospace:
+ mdev->cursor.pixels_1 = NULL;
+ mdev->cursor.pixels_2 = NULL;
+ dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n");
+ cursor_done:
+
+out:
+ if (r)
+ mgag200_driver_unload(dev);
+ return r;
+}
+
+int mgag200_driver_unload(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+
+ if (mdev == NULL)
+ return 0;
+ mgag200_modeset_fini(mdev);
+ mgag200_fbdev_fini(mdev);
+ drm_mode_config_cleanup(dev);
+ mgag200_mm_fini(mdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct mgag200_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = mgag200_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+static void mgag200_bo_unref(struct mgag200_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ *bo = NULL;
+}
+
+void mgag200_gem_free_object(struct drm_gem_object *obj)
+{
+ struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
+
+ mgag200_bo_unref(&mgag200_bo);
+}
+
+
+static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct mgag200_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_mga_bo(obj);
+ *offset = mgag200_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
new file mode 100644
index 00000000000..a034ed40825
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -0,0 +1,1675 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "mgag200_drv.h"
+
+#define MGAG200_LUT_SIZE 256
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void mga_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+
+ if (fb && fb->bits_per_pixel == 16) {
+ int inc = (fb->depth == 15) ? 8 : 4;
+ u8 r, b;
+ for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
+ if (fb->depth == 16) {
+ if (i > (MGAG200_LUT_SIZE >> 1)) {
+ r = b = 0;
+ } else {
+ r = mga_crtc->lut_r[i << 1];
+ b = mga_crtc->lut_b[i << 1];
+ }
+ } else {
+ r = mga_crtc->lut_r[i];
+ b = mga_crtc->lut_b[i];
+ }
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
+ }
+ return;
+ }
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+ }
+}
+
+static inline void mga_wait_vsync(struct mga_device *mdev)
+{
+ unsigned long timeout = jiffies + HZ/10;
+ unsigned int status = 0;
+
+ do {
+ status = RREG32(MGAREG_Status);
+ } while ((status & 0x08) && time_before(jiffies, timeout));
+ timeout = jiffies + HZ/10;
+ status = 0;
+ do {
+ status = RREG32(MGAREG_Status);
+ } while (!(status & 0x08) && time_before(jiffies, timeout));
+}
+
+static inline void mga_wait_busy(struct mga_device *mdev)
+{
+ unsigned long timeout = jiffies + HZ;
+ unsigned int status = 0;
+ do {
+ status = RREG8(MGAREG_Status + 2);
+ } while ((status & 0x01) && time_before(jiffies, timeout));
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+
+ m = n = p = 0;
+ vcomax = 320000;
+ vcomin = 160000;
+ pllreffreq = 25000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm - 1;
+ n = testn - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ if (delta > permitteddelta) {
+ printk(KERN_WARNING "PLL delta too large\n");
+ return 1;
+ }
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_PIX_PLLC_P, p);
+ return 0;
+}
+
+static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ bool pll_locked = false;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ if (i > 0) {
+ WREG8(MGAREG_CRTC_INDEX, 0x1e);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ if (tmp < 0xff)
+ WREG8(MGAREG_CRTC_DATA, tmp+1);
+ }
+
+ /* set pixclkdis to 1 */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG8(DAC_DATA, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ /* reset the PLL */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(50);
+
+ /* program pixel pll register */
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ /* turn pll on */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(500);
+
+ /* select the pixel pll */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+ tmp &= ~0x8;
+ WREG8(MGAREG_SEQ_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ return 0;
+}
+
+static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 50000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 1; testn < 257; testn++) {
+ for (testm = 1; testm < 17; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = testm - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG8(DAC_DATA, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG8(DAC_DATA, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ return 0;
+}
+
+static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ u8 tmp;
+ bool pll_locked = false;
+
+ m = n = p = 0;
+ vcomax = 800000;
+ vcomin = 400000;
+ pllreffreq = 33333;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp >>= 1) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 17; testn < 257; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1);
+ p = testp - 1;
+ }
+ if ((clock * testp) >= 600000)
+ p |= 0x80;
+ }
+ }
+ }
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+
+ return 0;
+}
+
+static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta;
+ int testr, testn, testm, testo;
+ unsigned int p, m, n;
+ unsigned int computed, vco;
+ int tmp;
+ const unsigned int m_div_val[] = { 1, 2, 4, 8 };
+
+ m = n = p = 0;
+ vcomax = 1488000;
+ vcomin = 1056000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+
+ for (testr = 0; testr < 4; testr++) {
+ if (delta == 0)
+ break;
+ for (testn = 5; testn < 129; testn++) {
+ if (delta == 0)
+ break;
+ for (testm = 3; testm >= 0; testm--) {
+ if (delta == 0)
+ break;
+ for (testo = 5; testo < 33; testo++) {
+ vco = pllreffreq * (testn + 1) /
+ (testr + 1);
+ if (vco < vcomin)
+ continue;
+ if (vco > vcomax)
+ continue;
+ computed = vco / (m_div_val[testm] * (testo + 1));
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm | (testo << 3);
+ n = testn;
+ p = testr | (testr << 3);
+ }
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ return 0;
+}
+
+static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
+{
+ switch(mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ return mga_g200se_set_plls(mdev, clock);
+ break;
+ case G200_WB:
+ return mga_g200wb_set_plls(mdev, clock);
+ break;
+ case G200_EV:
+ return mga_g200ev_set_plls(mdev, clock);
+ break;
+ case G200_EH:
+ return mga_g200eh_set_plls(mdev, clock);
+ break;
+ case G200_ER:
+ return mga_g200er_set_plls(mdev, clock);
+ break;
+ }
+ return 0;
+}
+
+static void mga_g200wb_prepare(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u8 tmp;
+ int iter_max;
+
+ /* 1- The first step is to warn the BMC of an upcoming mode change.
+ * We are putting the misc<0> to output.*/
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+ /* we are putting a 1 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+ /* 2- Second step to mask and further scan request
+ * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
+ */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x80;
+ WREG_DAC(MGA1064_SPAREREG, tmp);
+
+ /* 3a- the third step is to verifu if there is an active scan
+ * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
+ */
+ iter_max = 300;
+ while (!(tmp & 0x1) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+
+ /* 3b- this step occurs only if the remove is actually scanning
+ * we are waiting for the end of the frame which is a 1 on
+ * remvsyncsts (XSPAREREG<1>)
+ */
+ if (iter_max) {
+ iter_max = 300;
+ while ((tmp & 0x2) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+ }
+}
+
+static void mga_g200wb_commit(struct drm_crtc *crtc)
+{
+ u8 tmp;
+ struct mga_device *mdev = crtc->dev->dev_private;
+
+ /* 1- The first step is to ensure that the vrsten and hrsten are set */
+ WREG8(MGAREG_CRTCEXT_INDEX, 1);
+ tmp = RREG8(MGAREG_CRTCEXT_DATA);
+ WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+ /* 2- second step is to assert the rstlvl2 */
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x8;
+ WREG8(DAC_DATA, tmp);
+
+ /* wait 10 us */
+ udelay(10);
+
+ /* 3- deassert rstlvl2 */
+ tmp &= ~0x08;
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ WREG8(DAC_DATA, tmp);
+
+ /* 4- remove mask of scan request */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x80;
+ WREG8(DAC_DATA, tmp);
+
+ /* 5- put back a 0 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
+
+/*
+ This is how the framebuffer base address is stored in g200 cards:
+ * Assume @offset is the gpu_addr variable of the framebuffer object
+ * Then addr is the number of _pixels_ (not bytes) from the start of
+ VRAM to the first pixel we want to display. (divided by 2 for 32bit
+ framebuffers)
+ * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+ addr<20> -> CRTCEXT0<6>
+ addr<19-16> -> CRTCEXT0<3-0>
+ addr<15-8> -> CRTCC<7-0>
+ addr<7-0> -> CRTCD<7-0>
+ CRTCEXT0 has to be programmed last to trigger an update and make the
+ new addr variable take effect.
+ */
+static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u32 addr;
+ int count;
+ u8 crtcext0;
+
+ while (RREG8(0x1fda) & 0x08);
+ while (!(RREG8(0x1fda) & 0x08));
+
+ count = RREG8(MGAREG_VCOUNT) + 2;
+ while (RREG8(MGAREG_VCOUNT) < count);
+
+ WREG8(MGAREG_CRTCEXT_INDEX, 0);
+ crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
+ crtcext0 &= 0xB0;
+ addr = offset / 8;
+ /* Can't store addresses any higher than that...
+ but we also don't have more than 16MB of memory, so it should be fine. */
+ WARN_ON(addr > 0x1fffff);
+ crtcext0 |= (!!(addr & (1<<20)))<<6;
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+ WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+ WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
+}
+
+
+/* ast is different - we will force move buffers out of VRAM */
+static int mga_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ struct mgag200_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ mga_fb = to_mga_framebuffer(fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+
+ mga_fb = to_mga_framebuffer(crtc->primary->fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ mgag200_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&mdev->mfbdev->mfb == mga_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+
+ }
+ mgag200_bo_unreserve(bo);
+
+ mga_set_start_address(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int mga_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int hdisplay, hsyncstart, hsyncend, htotal;
+ int vdisplay, vsyncstart, vsyncend, vtotal;
+ int pitch;
+ int option = 0, option2 = 0;
+ int i;
+ unsigned char misc = 0;
+ unsigned char ext_vga[6];
+ u8 bppshift;
+
+ static unsigned char dacvalue[] = {
+ /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
+ /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
+ /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
+ /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
+ /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
+ /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ bppshift = mdev->bpp_shifts[(crtc->primary->fb->bits_per_pixel >> 3) - 1];
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ dacvalue[MGA1064_VREF_CTL] = 0x03;
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
+ MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ if (mdev->has_sdram)
+ option = 0x40049120;
+ else
+ option = 0x4004d120;
+ option2 = 0x00008000;
+ break;
+ case G200_WB:
+ dacvalue[MGA1064_VREF_CTL] = 0x07;
+ option = 0x41049120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EV:
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EH:
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_ER:
+ break;
+ }
+
+ switch (crtc->primary->fb->bits_per_pixel) {
+ case 8:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
+ break;
+ case 16:
+ if (crtc->primary->fb->depth == 15)
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
+ else
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
+ break;
+ case 24:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
+ break;
+ case 32:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
+ break;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= 0x40;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= 0x80;
+
+
+ for (i = 0; i < sizeof(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ if (IS_G200_SE(mdev) &&
+ ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
+ continue;
+ if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+ (i >= 0x44) && (i <= 0x4e))
+ continue;
+
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ if (mdev->type == G200_ER)
+ WREG_DAC(0x90, 0);
+
+ if (option)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ if (option2)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+ WREG_SEQ(2, 0xf);
+ WREG_SEQ(3, 0);
+ WREG_SEQ(4, 0xe);
+
+ pitch = crtc->primary->fb->pitches[0] / (crtc->primary->fb->bits_per_pixel / 8);
+ if (crtc->primary->fb->bits_per_pixel == 24)
+ pitch = (pitch * 3) >> (4 - bppshift);
+ else
+ pitch = pitch >> (4 - bppshift);
+
+ hdisplay = mode->hdisplay / 8 - 1;
+ hsyncstart = mode->hsync_start / 8 - 1;
+ hsyncend = mode->hsync_end / 8 - 1;
+ htotal = mode->htotal / 8 - 1;
+
+ /* Work around hardware quirk */
+ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
+ htotal++;
+
+ vdisplay = mode->vdisplay - 1;
+ vsyncstart = mode->vsync_start - 1;
+ vsyncend = mode->vsync_end - 1;
+ vtotal = mode->vtotal - 2;
+
+ WREG_GFX(0, 0);
+ WREG_GFX(1, 0);
+ WREG_GFX(2, 0);
+ WREG_GFX(3, 0);
+ WREG_GFX(4, 0);
+ WREG_GFX(5, 0x40);
+ WREG_GFX(6, 0x5);
+ WREG_GFX(7, 0xf);
+ WREG_GFX(8, 0xf);
+
+ WREG_CRT(0, htotal - 4);
+ WREG_CRT(1, hdisplay);
+ WREG_CRT(2, hdisplay);
+ WREG_CRT(3, (htotal & 0x1F) | 0x80);
+ WREG_CRT(4, hsyncstart);
+ WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
+ WREG_CRT(6, vtotal & 0xFF);
+ WREG_CRT(7, ((vtotal & 0x100) >> 8) |
+ ((vdisplay & 0x100) >> 7) |
+ ((vsyncstart & 0x100) >> 6) |
+ ((vdisplay & 0x100) >> 5) |
+ ((vdisplay & 0x100) >> 4) | /* linecomp */
+ ((vtotal & 0x200) >> 4)|
+ ((vdisplay & 0x200) >> 3) |
+ ((vsyncstart & 0x200) >> 2));
+ WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
+ ((vdisplay & 0x200) >> 3));
+ WREG_CRT(10, 0);
+ WREG_CRT(11, 0);
+ WREG_CRT(12, 0);
+ WREG_CRT(13, 0);
+ WREG_CRT(14, 0);
+ WREG_CRT(15, 0);
+ WREG_CRT(16, vsyncstart & 0xFF);
+ WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
+ WREG_CRT(18, vdisplay & 0xFF);
+ WREG_CRT(19, pitch & 0xFF);
+ WREG_CRT(20, 0);
+ WREG_CRT(21, vdisplay & 0xFF);
+ WREG_CRT(22, (vtotal + 1) & 0xFF);
+ WREG_CRT(23, 0xc3);
+ WREG_CRT(24, vdisplay & 0xFF);
+
+ ext_vga[0] = 0;
+ ext_vga[5] = 0;
+
+ /* TODO interlace */
+
+ ext_vga[0] |= (pitch & 0x300) >> 4;
+ ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
+ ((hdisplay & 0x100) >> 7) |
+ ((hsyncstart & 0x100) >> 6) |
+ (htotal & 0x40);
+ ext_vga[2] = ((vtotal & 0xc00) >> 10) |
+ ((vdisplay & 0x400) >> 8) |
+ ((vdisplay & 0xc00) >> 7) |
+ ((vsyncstart & 0xc00) >> 5) |
+ ((vdisplay & 0x400) >> 3);
+ if (crtc->primary->fb->bits_per_pixel == 24)
+ ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
+ else
+ ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
+ ext_vga[4] = 0;
+ if (mdev->type == G200_WB)
+ ext_vga[1] |= 0x88;
+
+ /* Set pixel clocks */
+ misc = 0x2d;
+ WREG8(MGA_MISC_OUT, misc);
+
+ mga_crtc_set_plls(mdev, mode->clock);
+
+ for (i = 0; i < 6; i++) {
+ WREG_ECRT(i, ext_vga[i]);
+ }
+
+ if (mdev->type == G200_ER)
+ WREG_ECRT(0x24, 0x5);
+
+ if (mdev->type == G200_EV) {
+ WREG_ECRT(6, 0);
+ }
+
+ WREG_ECRT(0, ext_vga[0]);
+ /* Enable mga pixel clock */
+ misc = 0x2d;
+
+ WREG8(MGA_MISC_OUT, misc);
+
+ if (adjusted_mode)
+ memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+
+ mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+ /* reset tagfifo */
+ if (mdev->type == G200_ER) {
+ u32 mem_ctl = RREG32(MGAREG_MEMCTL);
+ u8 seq1;
+
+ /* screen off */
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
+ WREG8(MGAREG_SEQ_DATA, seq1);
+
+ WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
+ udelay(1000);
+ WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+
+ WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+ }
+
+
+ if (IS_G200_SE(mdev)) {
+ if (mdev->unique_rev_id >= 0x02) {
+ u8 hi_pri_lvl;
+ u32 bpp;
+ u32 mb;
+
+ if (crtc->primary->fb->bits_per_pixel > 16)
+ bpp = 32;
+ else if (crtc->primary->fb->bits_per_pixel > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hi_pri_lvl = 0;
+ else if (mb > 2600)
+ hi_pri_lvl = 1;
+ else if (mb > 1900)
+ hi_pri_lvl = 2;
+ else if (mb > 1160)
+ hi_pri_lvl = 3;
+ else if (mb > 440)
+ hi_pri_lvl = 4;
+ else
+ hi_pri_lvl = 5;
+
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
+ } else {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ if (mdev->unique_rev_id >= 0x01)
+ WREG8(MGAREG_CRTCEXT_DATA, 0x03);
+ else
+ WREG8(MGAREG_CRTCEXT_DATA, 0x04);
+ }
+ }
+ return 0;
+}
+
+#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
+static int mga_suspend(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (mdev->suspended)
+ return 0;
+
+ WREG_SEQ(1, 0x20);
+ WREG_ECRT(1, 0x30);
+ /* Disable the pixel clock */
+ WREG_DAC(0x1a, 0x05);
+ /* Power down the DAC */
+ WREG_DAC(0x1e, 0x18);
+ /* Power down the pixel PLL */
+ WREG_DAC(0x1a, 0x0d);
+
+ /* Disable PLLs and clocks */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x1F8024);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+
+ mdev->suspended = true;
+
+ return 0;
+}
+
+static int mga_resume(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (!mdev->suspended)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_device(pdev);
+
+ /* Disable sysclk */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x4);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+
+ mdev->suspended = false;
+
+ return 0;
+}
+
+#endif
+
+static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 seq1 = 0, crtcext1 = 0;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ seq1 = 0;
+ crtcext1 = 0;
+ mga_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ seq1 = 0x20;
+ crtcext1 = 0x10;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ seq1 = 0x20;
+ crtcext1 = 0x20;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ seq1 = 0x20;
+ crtcext1 = 0x30;
+ break;
+ }
+
+#if 0
+ if (mode == DRM_MODE_DPMS_OFF) {
+ mga_suspend(crtc);
+ }
+#endif
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
+ mga_wait_vsync(mdev);
+ mga_wait_busy(mdev);
+ WREG8(MGAREG_SEQ_DATA, seq1);
+ msleep(20);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
+ crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
+ WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
+
+#if 0
+ if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
+ mga_resume(crtc);
+ drm_helper_resume_force_mode(dev);
+ }
+#endif
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void mga_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 tmp;
+
+ /* mga_resume(crtc);*/
+
+ WREG8(MGAREG_CRTC_INDEX, 0x11);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ WREG_CRT(0x11, tmp | 0x80);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ WREG_SEQ(0, 1);
+ msleep(50);
+ WREG_SEQ(1, 0x20);
+ msleep(20);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ /* start sync reset */
+ WREG_SEQ(0, 1);
+ WREG_SEQ(1, tmp | 0x20);
+ }
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_prepare(crtc);
+
+ WREG_CRT(17, 0);
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void mga_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ u8 tmp;
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_commit(crtc);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ msleep(50);
+ WREG_SEQ(1, 0x0);
+ msleep(20);
+ WREG_SEQ(0, 0x3);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ tmp &= ~0x20;
+ WREG_SEQ(0x1, tmp);
+ WREG_SEQ(0, 3);
+ }
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
+ int i;
+
+ for (i = start; i < end; i++) {
+ mga_crtc->lut_r[i] = red[i] >> 8;
+ mga_crtc->lut_g[i] = green[i] >> 8;
+ mga_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ mga_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void mga_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(mga_crtc);
+}
+
+static void mga_crtc_disable(struct drm_crtc *crtc)
+{
+ int ret;
+ DRM_DEBUG_KMS("\n");
+ mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->primary->fb);
+ struct drm_gem_object *obj = mga_fb->obj;
+ struct mgag200_bo *bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+ crtc->primary->fb = NULL;
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs mga_crtc_funcs = {
+ .cursor_set = mga_crtc_cursor_set,
+ .cursor_move = mga_crtc_cursor_move,
+ .gamma_set = mga_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mga_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .disable = mga_crtc_disable,
+ .dpms = mga_crtc_dpms,
+ .mode_fixup = mga_crtc_mode_fixup,
+ .mode_set = mga_crtc_mode_set,
+ .mode_set_base = mga_crtc_mode_set_base,
+ .prepare = mga_crtc_prepare,
+ .commit = mga_crtc_commit,
+ .load_lut = mga_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void mga_crtc_init(struct mga_device *mdev)
+{
+ struct mga_crtc *mga_crtc;
+ int i;
+
+ mga_crtc = kzalloc(sizeof(struct mga_crtc) +
+ (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (mga_crtc == NULL)
+ return;
+
+ drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
+ mdev->mode_info.crtc = mga_crtc;
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ mga_crtc->lut_r[i] = i;
+ mga_crtc->lut_g[i] = i;
+ mga_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ mga_crtc->lut_r[regno] = red >> 8;
+ mga_crtc->lut_g[regno] = green >> 8;
+ mga_crtc->lut_b[regno] = blue >> 8;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ *red = (u16)mga_crtc->lut_r[regno] << 8;
+ *green = (u16)mga_crtc->lut_g[regno] << 8;
+ *blue = (u16)mga_crtc->lut_b[regno] << 8;
+}
+
+/*
+ * The encoder comes after the CRTC in the output pipeline, but before
+ * the connector. It's responsible for ensuring that the digital
+ * stream is appropriately converted into the output format. Setup is
+ * very simple in this case - all we have to do is inform qemu of the
+ * colour depth in order to ensure that it displays appropriately
+ */
+
+/*
+ * These functions are analagous to those in the CRTC code, but are intended
+ * to handle any encoder-specific limitations
+ */
+static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mga_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void mga_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mga_encoder);
+}
+
+static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
+ .dpms = mga_encoder_dpms,
+ .mode_fixup = mga_encoder_mode_fixup,
+ .mode_set = mga_encoder_mode_set,
+ .prepare = mga_encoder_prepare,
+ .commit = mga_encoder_commit,
+};
+
+static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
+ .destroy = mga_encoder_destroy,
+};
+
+static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct mga_encoder *mga_encoder;
+
+ mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
+ if (!mga_encoder)
+ return NULL;
+
+ encoder = &mga_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+static int mga_vga_get_modes(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ return ret;
+}
+
+static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
+ int bits_per_pixel)
+{
+ uint32_t total_area, divisor;
+ int64_t active_area, pixels_per_second, bandwidth;
+ uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
+
+ divisor = 1024;
+
+ if (!mode->htotal || !mode->vtotal || !mode->clock)
+ return 0;
+
+ active_area = mode->hdisplay * mode->vdisplay;
+ total_area = mode->htotal * mode->vtotal;
+
+ pixels_per_second = active_area * mode->clock * 1000;
+ do_div(pixels_per_second, total_area);
+
+ bandwidth = pixels_per_second * bytes_per_pixel * 100;
+ do_div(bandwidth, divisor);
+
+ return (uint32_t)(bandwidth);
+}
+
+#define MODE_BANDWIDTH MODE_BAD
+
+static int mga_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+ struct mga_fbdev *mfbdev = mdev->mfbdev;
+ struct drm_fb_helper *fb_helper = &mfbdev->helper;
+ struct drm_fb_helper_connector *fb_helper_conn = NULL;
+ int bpp = 32;
+ int i = 0;
+
+ if (IS_G200_SE(mdev)) {
+ if (mdev->unique_rev_id == 0x01) {
+ if (mode->hdisplay > 1600)
+ return MODE_VIRTUAL_X;
+ if (mode->vdisplay > 1200)
+ return MODE_VIRTUAL_Y;
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (24400 * 1024))
+ return MODE_BANDWIDTH;
+ } else if (mdev->unique_rev_id >= 0x02) {
+ if (mode->hdisplay > 1920)
+ return MODE_VIRTUAL_X;
+ if (mode->vdisplay > 1200)
+ return MODE_VIRTUAL_Y;
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (30100 * 1024))
+ return MODE_BANDWIDTH;
+ }
+ } else if (mdev->type == G200_WB) {
+ if (mode->hdisplay > 1280)
+ return MODE_VIRTUAL_X;
+ if (mode->vdisplay > 1024)
+ return MODE_VIRTUAL_Y;
+ if (mga_vga_calculate_mode_bandwidth(mode,
+ bpp > (31877 * 1024)))
+ return MODE_BANDWIDTH;
+ } else if (mdev->type == G200_EV &&
+ (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (32700 * 1024))) {
+ return MODE_BANDWIDTH;
+ } else if (mdev->type == G200_EH &&
+ (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (37500 * 1024))) {
+ return MODE_BANDWIDTH;
+ } else if (mdev->type == G200_ER &&
+ (mga_vga_calculate_mode_bandwidth(mode,
+ bpp) > (55000 * 1024))) {
+ return MODE_BANDWIDTH;
+ }
+
+ if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+ mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+ mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+ mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+ return MODE_BAD;
+ }
+
+ /* Validate the mode input by the user */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (fb_helper->connector_info[i]->connector == connector) {
+ /* Found the helper for this connector */
+ fb_helper_conn = fb_helper->connector_info[i];
+ if (fb_helper_conn->cmdline_mode.specified) {
+ if (fb_helper_conn->cmdline_mode.bpp_specified) {
+ bpp = fb_helper_conn->cmdline_mode.bpp;
+ }
+ }
+ }
+ }
+
+ if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+ if (fb_helper_conn)
+ fb_helper_conn->cmdline_mode.specified = false;
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status mga_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void mga_connector_destroy(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ mgag200_i2c_destroy(mga_connector->i2c);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+ .get_modes = mga_vga_get_modes,
+ .mode_valid = mga_vga_mode_valid,
+ .best_encoder = mga_connector_best_encoder,
+};
+
+struct drm_connector_funcs mga_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = mga_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mga_connector_destroy,
+};
+
+static struct drm_connector *mga_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct mga_connector *mga_connector;
+
+ mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
+ if (!mga_connector)
+ return NULL;
+
+ connector = &mga_connector->base;
+
+ drm_connector_init(dev, connector,
+ &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+
+ mga_connector->i2c = mgag200_i2c_create(dev);
+ if (!mga_connector->i2c)
+ DRM_ERROR("failed to add ddc bus\n");
+
+ return connector;
+}
+
+
+int mgag200_modeset_init(struct mga_device *mdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ mdev->mode_info.mode_config_initialized = true;
+
+ mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+
+ mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+
+ mga_crtc_init(mdev);
+
+ encoder = mga_encoder_init(mdev->dev);
+ if (!encoder) {
+ DRM_ERROR("mga_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = mga_vga_init(mdev->dev);
+ if (!connector) {
+ DRM_ERROR("mga_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = mgag200_fbdev_init(mdev);
+ if (ret) {
+ DRM_ERROR("mga_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void mgag200_modeset_fini(struct mga_device *mdev)
+{
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
new file mode 100644
index 00000000000..3ae442a64bd
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -0,0 +1,665 @@
+/*
+ * MGA Millennium (MGA2064W) functions
+ * MGA Mystique (MGA1064SG) functions
+ *
+ * Copyright 1996 The XFree86 Project, Inc.
+ *
+ * Authors
+ * Dirk Hohndel
+ * hohndel@XFree86.Org
+ * David Dawes
+ * dawes@XFree86.Org
+ * Contributors:
+ * Guy DESBIEF, Aix-en-provence, France
+ * g.desbief@aix.pacwan.net
+ * MGA1064SG Mystique register file
+ */
+
+
+#ifndef _MGA_REG_H_
+#define _MGA_REG_H_
+
+#define MGAREG_DWGCTL 0x1c00
+#define MGAREG_MACCESS 0x1c04
+/* the following is a mystique only register */
+#define MGAREG_MCTLWTST 0x1c08
+#define MGAREG_ZORG 0x1c0c
+
+#define MGAREG_PAT0 0x1c10
+#define MGAREG_PAT1 0x1c14
+#define MGAREG_PLNWT 0x1c1c
+
+#define MGAREG_BCOL 0x1c20
+#define MGAREG_FCOL 0x1c24
+
+#define MGAREG_SRC0 0x1c30
+#define MGAREG_SRC1 0x1c34
+#define MGAREG_SRC2 0x1c38
+#define MGAREG_SRC3 0x1c3c
+
+#define MGAREG_XYSTRT 0x1c40
+#define MGAREG_XYEND 0x1c44
+
+#define MGAREG_SHIFT 0x1c50
+/* the following is a mystique only register */
+#define MGAREG_DMAPAD 0x1c54
+#define MGAREG_SGN 0x1c58
+#define MGAREG_LEN 0x1c5c
+
+#define MGAREG_AR0 0x1c60
+#define MGAREG_AR1 0x1c64
+#define MGAREG_AR2 0x1c68
+#define MGAREG_AR3 0x1c6c
+#define MGAREG_AR4 0x1c70
+#define MGAREG_AR5 0x1c74
+#define MGAREG_AR6 0x1c78
+
+#define MGAREG_CXBNDRY 0x1c80
+#define MGAREG_FXBNDRY 0x1c84
+#define MGAREG_YDSTLEN 0x1c88
+#define MGAREG_PITCH 0x1c8c
+
+#define MGAREG_YDST 0x1c90
+#define MGAREG_YDSTORG 0x1c94
+#define MGAREG_YTOP 0x1c98
+#define MGAREG_YBOT 0x1c9c
+
+#define MGAREG_CXLEFT 0x1ca0
+#define MGAREG_CXRIGHT 0x1ca4
+#define MGAREG_FXLEFT 0x1ca8
+#define MGAREG_FXRIGHT 0x1cac
+
+#define MGAREG_XDST 0x1cb0
+
+#define MGAREG_DR0 0x1cc0
+#define MGAREG_DR1 0x1cc4
+#define MGAREG_DR2 0x1cc8
+#define MGAREG_DR3 0x1ccc
+
+#define MGAREG_DR4 0x1cd0
+#define MGAREG_DR5 0x1cd4
+#define MGAREG_DR6 0x1cd8
+#define MGAREG_DR7 0x1cdc
+
+#define MGAREG_DR8 0x1ce0
+#define MGAREG_DR9 0x1ce4
+#define MGAREG_DR10 0x1ce8
+#define MGAREG_DR11 0x1cec
+
+#define MGAREG_DR12 0x1cf0
+#define MGAREG_DR13 0x1cf4
+#define MGAREG_DR14 0x1cf8
+#define MGAREG_DR15 0x1cfc
+
+#define MGAREG_SRCORG 0x2cb4
+#define MGAREG_DSTORG 0x2cb8
+
+/* add or or this to one of the previous "power registers" to start
+ the drawing engine */
+
+#define MGAREG_EXEC 0x0100
+
+#define MGAREG_FIFOSTATUS 0x1e10
+#define MGAREG_Status 0x1e14
+#define MGAREG_CACHEFLUSH 0x1fff
+#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_IEN 0x1e1c
+
+#define MGAREG_VCOUNT 0x1e20
+
+#define MGAREG_Reset 0x1e40
+
+#define MGAREG_OPMODE 0x1e54
+
+/* Warp Registers */
+#define MGAREG_WIADDR 0x1dc0
+#define MGAREG_WIADDR2 0x1dd8
+#define MGAREG_WGETMSB 0x1dc8
+#define MGAREG_WVRTXSZ 0x1dcc
+#define MGAREG_WACCEPTSEQ 0x1dd4
+#define MGAREG_WMISC 0x1e70
+
+#define MGAREG_MEMCTL 0x2e08
+
+/* OPMODE register additives */
+
+#define MGAOPM_DMA_GENERAL (0x00 << 2)
+#define MGAOPM_DMA_BLIT (0x01 << 2)
+#define MGAOPM_DMA_VECTOR (0x10 << 2)
+
+/* MACCESS register additives */
+#define MGAMAC_PW8 0x00
+#define MGAMAC_PW16 0x01
+#define MGAMAC_PW24 0x03 /* not a typo */
+#define MGAMAC_PW32 0x02 /* not a typo */
+#define MGAMAC_BYPASS332 0x10000000
+#define MGAMAC_NODITHER 0x40000000
+#define MGAMAC_DIT555 0x80000000
+
+/* DWGCTL register additives */
+
+/* Lines */
+
+#define MGADWG_LINE_OPEN 0x00
+#define MGADWG_AUTOLINE_OPEN 0x01
+#define MGADWG_LINE_CLOSE 0x02
+#define MGADWG_AUTOLINE_CLOSE 0x03
+
+/* Trapezoids */
+#define MGADWG_TRAP 0x04
+#define MGADWG_TEXTURE_TRAP 0x06
+
+/* BitBlts */
+
+#define MGADWG_BITBLT 0x08
+#define MGADWG_FBITBLT 0x0c
+#define MGADWG_ILOAD 0x09
+#define MGADWG_ILOAD_SCALE 0x0d
+#define MGADWG_ILOAD_FILTER 0x0f
+#define MGADWG_ILOAD_HIQH 0x07
+#define MGADWG_ILOAD_HIQHV 0x0e
+#define MGADWG_IDUMP 0x0a
+
+/* atype access to WRAM */
+
+#define MGADWG_RPL ( 0x00 << 4 )
+#define MGADWG_RSTR ( 0x01 << 4 )
+#define MGADWG_ZI ( 0x03 << 4 )
+#define MGADWG_BLK ( 0x04 << 4 )
+#define MGADWG_I ( 0x07 << 4 )
+
+/* specifies whether bit blits are linear or xy */
+#define MGADWG_LINEAR ( 0x01 << 7 )
+
+/* z drawing mode. use MGADWG_NOZCMP for always */
+
+#define MGADWG_NOZCMP ( 0x00 << 8 )
+#define MGADWG_ZE ( 0x02 << 8 )
+#define MGADWG_ZNE ( 0x03 << 8 )
+#define MGADWG_ZLT ( 0x04 << 8 )
+#define MGADWG_ZLTE ( 0x05 << 8 )
+#define MGADWG_GT ( 0x06 << 8 )
+#define MGADWG_GTE ( 0x07 << 8 )
+
+/* use this to force colour expansion circuitry to do its stuff */
+
+#define MGADWG_SOLID ( 0x01 << 11 )
+
+/* ar register at zero */
+
+#define MGADWG_ARZERO ( 0x01 << 12 )
+
+#define MGADWG_SGNZERO ( 0x01 << 13 )
+
+#define MGADWG_SHIFTZERO ( 0x01 << 14 )
+
+/* See table on 4-43 for bop ALU operations */
+
+/* See table on 4-44 for translucidity masks */
+
+#define MGADWG_BMONOLEF ( 0x00 << 25 )
+#define MGADWG_BMONOWF ( 0x04 << 25 )
+#define MGADWG_BPLAN ( 0x01 << 25 )
+
+/* note that if bfcol is specified and you're doing a bitblt, it causes
+ a fbitblt to be performed, so check that you obey the fbitblt rules */
+
+#define MGADWG_BFCOL ( 0x02 << 25 )
+#define MGADWG_BUYUV ( 0x0e << 25 )
+#define MGADWG_BU32BGR ( 0x03 << 25 )
+#define MGADWG_BU32RGB ( 0x07 << 25 )
+#define MGADWG_BU24BGR ( 0x0b << 25 )
+#define MGADWG_BU24RGB ( 0x0f << 25 )
+
+#define MGADWG_PATTERN ( 0x01 << 29 )
+#define MGADWG_TRANSC ( 0x01 << 30 )
+#define MGAREG_MISC_WRITE 0x3c2
+#define MGAREG_MISC_READ 0x3cc
+#define MGAREG_MEM_MISC_WRITE 0x1fc2
+#define MGAREG_MEM_MISC_READ 0x1fcc
+
+#define MGAREG_MISC_IOADSEL (0x1 << 0)
+#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
+#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
+#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
+#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
+
+/* MMIO VGA registers */
+#define MGAREG_SEQ_INDEX 0x1fc4
+#define MGAREG_SEQ_DATA 0x1fc5
+#define MGAREG_CRTC_INDEX 0x1fd4
+#define MGAREG_CRTC_DATA 0x1fd5
+#define MGAREG_CRTCEXT_INDEX 0x1fde
+#define MGAREG_CRTCEXT_DATA 0x1fdf
+
+/* Cursor X and Y position */
+#define MGA_CURPOSXL 0x3c0c
+#define MGA_CURPOSXH 0x3c0d
+#define MGA_CURPOSYL 0x3c0e
+#define MGA_CURPOSYH 0x3c0f
+
+/* MGA bits for registers PCI_OPTION_REG */
+#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 )
+#define MGA1064_OPT_SYS_CLK_PLL ( 0x01 << 0 )
+#define MGA1064_OPT_SYS_CLK_EXT ( 0x02 << 0 )
+#define MGA1064_OPT_SYS_CLK_MSK ( 0x03 << 0 )
+
+#define MGA1064_OPT_SYS_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_OPT_G_CLK_DIV_1 ( 0x01 << 3 )
+#define MGA1064_OPT_M_CLK_DIV_1 ( 0x01 << 4 )
+
+#define MGA1064_OPT_SYS_PLL_PDN ( 0x01 << 5 )
+#define MGA1064_OPT_VGA_ION ( 0x01 << 8 )
+
+/* MGA registers in PCI config space */
+#define PCI_MGA_INDEX 0x44
+#define PCI_MGA_DATA 0x48
+#define PCI_MGA_OPTION 0x40
+#define PCI_MGA_OPTION2 0x50
+#define PCI_MGA_OPTION3 0x54
+
+#define RAMDAC_OFFSET 0x3c00
+
+/* TVP3026 direct registers */
+
+#define TVP3026_INDEX 0x00
+#define TVP3026_WADR_PAL 0x00
+#define TVP3026_COL_PAL 0x01
+#define TVP3026_PIX_RD_MSK 0x02
+#define TVP3026_RADR_PAL 0x03
+#define TVP3026_CUR_COL_ADDR 0x04
+#define TVP3026_CUR_COL_DATA 0x05
+#define TVP3026_DATA 0x0a
+#define TVP3026_CUR_RAM 0x0b
+#define TVP3026_CUR_XLOW 0x0c
+#define TVP3026_CUR_XHI 0x0d
+#define TVP3026_CUR_YLOW 0x0e
+#define TVP3026_CUR_YHI 0x0f
+
+/* TVP3026 indirect registers */
+
+#define TVP3026_SILICON_REV 0x01
+#define TVP3026_CURSOR_CTL 0x06
+#define TVP3026_LATCH_CTL 0x0f
+#define TVP3026_TRUE_COLOR_CTL 0x18
+#define TVP3026_MUX_CTL 0x19
+#define TVP3026_CLK_SEL 0x1a
+#define TVP3026_PAL_PAGE 0x1c
+#define TVP3026_GEN_CTL 0x1d
+#define TVP3026_MISC_CTL 0x1e
+#define TVP3026_GEN_IO_CTL 0x2a
+#define TVP3026_GEN_IO_DATA 0x2b
+#define TVP3026_PLL_ADDR 0x2c
+#define TVP3026_PIX_CLK_DATA 0x2d
+#define TVP3026_MEM_CLK_DATA 0x2e
+#define TVP3026_LOAD_CLK_DATA 0x2f
+#define TVP3026_KEY_RED_LOW 0x32
+#define TVP3026_KEY_RED_HI 0x33
+#define TVP3026_KEY_GREEN_LOW 0x34
+#define TVP3026_KEY_GREEN_HI 0x35
+#define TVP3026_KEY_BLUE_LOW 0x36
+#define TVP3026_KEY_BLUE_HI 0x37
+#define TVP3026_KEY_CTL 0x38
+#define TVP3026_MCLK_CTL 0x39
+#define TVP3026_SENSE_TEST 0x3a
+#define TVP3026_TEST_DATA 0x3b
+#define TVP3026_CRC_LSB 0x3c
+#define TVP3026_CRC_MSB 0x3d
+#define TVP3026_CRC_CTL 0x3e
+#define TVP3026_ID 0x3f
+#define TVP3026_RESET 0xff
+
+
+/* MGA1064 DAC Register file */
+/* MGA1064 direct registers */
+
+#define MGA1064_INDEX 0x00
+#define MGA1064_WADR_PAL 0x00
+#define MGA1064_SPAREREG 0x00
+#define MGA1064_COL_PAL 0x01
+#define MGA1064_PIX_RD_MSK 0x02
+#define MGA1064_RADR_PAL 0x03
+#define MGA1064_DATA 0x0a
+
+#define MGA1064_CUR_XLOW 0x0c
+#define MGA1064_CUR_XHI 0x0d
+#define MGA1064_CUR_YLOW 0x0e
+#define MGA1064_CUR_YHI 0x0f
+
+/* MGA1064 indirect registers */
+#define MGA1064_DVI_PIPE_CTL 0x03
+#define MGA1064_CURSOR_BASE_ADR_LOW 0x04
+#define MGA1064_CURSOR_BASE_ADR_HI 0x05
+#define MGA1064_CURSOR_CTL 0x06
+#define MGA1064_CURSOR_COL0_RED 0x08
+#define MGA1064_CURSOR_COL0_GREEN 0x09
+#define MGA1064_CURSOR_COL0_BLUE 0x0a
+
+#define MGA1064_CURSOR_COL1_RED 0x0c
+#define MGA1064_CURSOR_COL1_GREEN 0x0d
+#define MGA1064_CURSOR_COL1_BLUE 0x0e
+
+#define MGA1064_CURSOR_COL2_RED 0x010
+#define MGA1064_CURSOR_COL2_GREEN 0x011
+#define MGA1064_CURSOR_COL2_BLUE 0x012
+
+#define MGA1064_VREF_CTL 0x018
+
+#define MGA1064_MUL_CTL 0x19
+#define MGA1064_MUL_CTL_8bits 0x0
+#define MGA1064_MUL_CTL_15bits 0x01
+#define MGA1064_MUL_CTL_16bits 0x02
+#define MGA1064_MUL_CTL_24bits 0x03
+#define MGA1064_MUL_CTL_32bits 0x04
+#define MGA1064_MUL_CTL_2G8V16bits 0x05
+#define MGA1064_MUL_CTL_G16V16bits 0x06
+#define MGA1064_MUL_CTL_32_24bits 0x07
+
+#define MGA1064_PIX_CLK_CTL 0x1a
+#define MGA1064_PIX_CLK_CTL_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN ( 0x01 << 3 )
+#define MGA1064_PIX_CLK_CTL_SEL_PCI ( 0x00 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_PLL ( 0x01 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_EXT ( 0x02 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_MSK ( 0x03 << 0 )
+
+#define MGA1064_GEN_CTL 0x1d
+#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS (0x01 << 5)
+#define MGA1064_MISC_CTL 0x1e
+#define MGA1064_MISC_CTL_DAC_EN ( 0x01 << 0 )
+#define MGA1064_MISC_CTL_VGA ( 0x01 << 1 )
+#define MGA1064_MISC_CTL_DIS_CON ( 0x03 << 1 )
+#define MGA1064_MISC_CTL_MAFC ( 0x02 << 1 )
+#define MGA1064_MISC_CTL_VGA8 ( 0x01 << 3 )
+#define MGA1064_MISC_CTL_DAC_RAM_CS ( 0x01 << 4 )
+
+#define MGA1064_GEN_IO_CTL2 0x29
+#define MGA1064_GEN_IO_CTL 0x2a
+#define MGA1064_GEN_IO_DATA 0x2b
+#define MGA1064_SYS_PLL_M 0x2c
+#define MGA1064_SYS_PLL_N 0x2d
+#define MGA1064_SYS_PLL_P 0x2e
+#define MGA1064_SYS_PLL_STAT 0x2f
+
+#define MGA1064_REMHEADCTL 0x30
+#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
+#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
+
+#define MGA1064_REMHEADCTL2 0x31
+
+#define MGA1064_ZOOM_CTL 0x38
+#define MGA1064_SENSE_TST 0x3a
+
+#define MGA1064_CRC_LSB 0x3c
+#define MGA1064_CRC_MSB 0x3d
+#define MGA1064_CRC_CTL 0x3e
+#define MGA1064_COL_KEY_MSK_LSB 0x40
+#define MGA1064_COL_KEY_MSK_MSB 0x41
+#define MGA1064_COL_KEY_LSB 0x42
+#define MGA1064_COL_KEY_MSB 0x43
+#define MGA1064_PIX_PLLA_M 0x44
+#define MGA1064_PIX_PLLA_N 0x45
+#define MGA1064_PIX_PLLA_P 0x46
+#define MGA1064_PIX_PLLB_M 0x48
+#define MGA1064_PIX_PLLB_N 0x49
+#define MGA1064_PIX_PLLB_P 0x4a
+#define MGA1064_PIX_PLLC_M 0x4c
+#define MGA1064_PIX_PLLC_N 0x4d
+#define MGA1064_PIX_PLLC_P 0x4e
+
+#define MGA1064_PIX_PLL_STAT 0x4f
+
+/*Added for G450 dual head*/
+
+#define MGA1064_VID_PLL_STAT 0x8c
+#define MGA1064_VID_PLL_P 0x8D
+#define MGA1064_VID_PLL_M 0x8E
+#define MGA1064_VID_PLL_N 0x8F
+
+/* Modified PLL for G200 Winbond (G200WB) */
+#define MGA1064_WB_PIX_PLLC_M 0xb7
+#define MGA1064_WB_PIX_PLLC_N 0xb6
+#define MGA1064_WB_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200EV) */
+#define MGA1064_EV_PIX_PLLC_M 0xb6
+#define MGA1064_EV_PIX_PLLC_N 0xb7
+#define MGA1064_EV_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 EH */
+#define MGA1064_EH_PIX_PLLC_M 0xb6
+#define MGA1064_EH_PIX_PLLC_N 0xb7
+#define MGA1064_EH_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200ER) */
+#define MGA1064_ER_PIX_PLLC_M 0xb7
+#define MGA1064_ER_PIX_PLLC_N 0xb6
+#define MGA1064_ER_PIX_PLLC_P 0xb8
+
+#define MGA1064_DISP_CTL 0x8a
+#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK 0x01
+#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC1OUTSEL_EN 0x01
+#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK (0x03 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1 (0x01 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2 (0x02 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE (0x03 << 2)
+#define MGA1064_DISP_CTL_PANOUTSEL_MASK (0x03 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1 (0x01 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB (0x02 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656 (0x03 << 5)
+
+#define MGA1064_SYNC_CTL 0x8b
+
+#define MGA1064_PWR_CTL 0xa0
+#define MGA1064_PWR_CTL_DAC2_EN (0x01 << 0)
+#define MGA1064_PWR_CTL_VID_PLL_EN (0x01 << 1)
+#define MGA1064_PWR_CTL_PANEL_EN (0x01 << 2)
+#define MGA1064_PWR_CTL_RFIFO_EN (0x01 << 3)
+#define MGA1064_PWR_CTL_CFIFO_EN (0x01 << 4)
+
+#define MGA1064_PAN_CTL 0xa2
+
+/* Using crtc2 */
+#define MGAREG2_C2CTL 0x10
+#define MGAREG2_C2HPARAM 0x14
+#define MGAREG2_C2HSYNC 0x18
+#define MGAREG2_C2VPARAM 0x1c
+#define MGAREG2_C2VSYNC 0x20
+#define MGAREG2_C2STARTADD0 0x28
+
+#define MGAREG2_C2OFFSET 0x40
+#define MGAREG2_C2DATACTL 0x4c
+
+#define MGAREG_C2CTL 0x3c10
+#define MGAREG_C2CTL_C2_EN 0x01
+
+#define MGAREG_C2_HIPRILVL_M (0x07 << 4)
+#define MGAREG_C2_MAXHIPRI_M (0x07 << 8)
+
+#define MGAREG_C2CTL_PIXCLKSEL_MASK (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSELH_MASK (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_PCICLK 0x00
+#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK (0x01 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL (0x02 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VDCLK (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL (0x01 << 1) | (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL (0x02 << 1) | (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKDIS_MASK (0x01 << 3)
+#define MGAREG_C2CTL_PIXCLKDIS_DISABLE (0x01 << 3)
+
+#define MGAREG_C2CTL_CRTCDACSEL_MASK (0x01 << 20)
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC1 0x00
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC2 (0x01 << 20)
+
+#define MGAREG_C2HPARAM 0x3c14
+#define MGAREG_C2HSYNC 0x3c18
+#define MGAREG_C2VPARAM 0x3c1c
+#define MGAREG_C2VSYNC 0x3c20
+#define MGAREG_C2STARTADD0 0x3c28
+
+#define MGAREG_C2OFFSET 0x3c40
+#define MGAREG_C2DATACTL 0x3c4c
+
+/* video register */
+
+#define MGAREG_BESA1C3ORG 0x3d60
+#define MGAREG_BESA1CORG 0x3d10
+#define MGAREG_BESA1ORG 0x3d00
+#define MGAREG_BESCTL 0x3d20
+#define MGAREG_BESGLOBCTL 0x3dc0
+#define MGAREG_BESHCOORD 0x3d28
+#define MGAREG_BESHISCAL 0x3d30
+#define MGAREG_BESHSRCEND 0x3d3c
+#define MGAREG_BESHSRCLST 0x3d50
+#define MGAREG_BESHSRCST 0x3d38
+#define MGAREG_BESLUMACTL 0x3d40
+#define MGAREG_BESPITCH 0x3d24
+#define MGAREG_BESV1SRCLST 0x3d54
+#define MGAREG_BESV1WGHT 0x3d48
+#define MGAREG_BESVCOORD 0x3d2c
+#define MGAREG_BESVISCAL 0x3d34
+
+/* texture engine registers */
+
+#define MGAREG_TMR0 0x2c00
+#define MGAREG_TMR1 0x2c04
+#define MGAREG_TMR2 0x2c08
+#define MGAREG_TMR3 0x2c0c
+#define MGAREG_TMR4 0x2c10
+#define MGAREG_TMR5 0x2c14
+#define MGAREG_TMR6 0x2c18
+#define MGAREG_TMR7 0x2c1c
+#define MGAREG_TMR8 0x2c20
+#define MGAREG_TEXORG 0x2c24
+#define MGAREG_TEXWIDTH 0x2c28
+#define MGAREG_TEXHEIGHT 0x2c2c
+#define MGAREG_TEXCTL 0x2c30
+# define MGA_TW4 (0x00000000)
+# define MGA_TW8 (0x00000001)
+# define MGA_TW15 (0x00000002)
+# define MGA_TW16 (0x00000003)
+# define MGA_TW12 (0x00000004)
+# define MGA_TW32 (0x00000006)
+# define MGA_TW8A (0x00000007)
+# define MGA_TW8AL (0x00000008)
+# define MGA_TW422 (0x0000000A)
+# define MGA_TW422UYVY (0x0000000B)
+# define MGA_PITCHLIN (0x00000100)
+# define MGA_NOPERSPECTIVE (0x00200000)
+# define MGA_TAKEY (0x02000000)
+# define MGA_TAMASK (0x04000000)
+# define MGA_CLAMPUV (0x18000000)
+# define MGA_TEXMODULATE (0x20000000)
+#define MGAREG_TEXCTL2 0x2c3c
+# define MGA_G400_TC2_MAGIC (0x00008000)
+# define MGA_TC2_DECALBLEND (0x00000001)
+# define MGA_TC2_IDECAL (0x00000002)
+# define MGA_TC2_DECALDIS (0x00000004)
+# define MGA_TC2_CKSTRANSDIS (0x00000010)
+# define MGA_TC2_BORDEREN (0x00000020)
+# define MGA_TC2_SPECEN (0x00000040)
+# define MGA_TC2_DUALTEX (0x00000080)
+# define MGA_TC2_TABLEFOG (0x00000100)
+# define MGA_TC2_BUMPMAP (0x00000200)
+# define MGA_TC2_SELECT_TMU1 (0x80000000)
+#define MGAREG_TEXTRANS 0x2c34
+#define MGAREG_TEXTRANSHIGH 0x2c38
+#define MGAREG_TEXFILTER 0x2c58
+# define MGA_MIN_NRST (0x00000000)
+# define MGA_MIN_BILIN (0x00000002)
+# define MGA_MIN_ANISO (0x0000000D)
+# define MGA_MAG_NRST (0x00000000)
+# define MGA_MAG_BILIN (0x00000020)
+# define MGA_FILTERALPHA (0x00100000)
+#define MGAREG_ALPHASTART 0x2c70
+#define MGAREG_ALPHAXINC 0x2c74
+#define MGAREG_ALPHAYINC 0x2c78
+#define MGAREG_ALPHACTRL 0x2c7c
+# define MGA_SRC_ZERO (0x00000000)
+# define MGA_SRC_ONE (0x00000001)
+# define MGA_SRC_DST_COLOR (0x00000002)
+# define MGA_SRC_ONE_MINUS_DST_COLOR (0x00000003)
+# define MGA_SRC_ALPHA (0x00000004)
+# define MGA_SRC_ONE_MINUS_SRC_ALPHA (0x00000005)
+# define MGA_SRC_DST_ALPHA (0x00000006)
+# define MGA_SRC_ONE_MINUS_DST_ALPHA (0x00000007)
+# define MGA_SRC_SRC_ALPHA_SATURATE (0x00000008)
+# define MGA_SRC_BLEND_MASK (0x0000000f)
+# define MGA_DST_ZERO (0x00000000)
+# define MGA_DST_ONE (0x00000010)
+# define MGA_DST_SRC_COLOR (0x00000020)
+# define MGA_DST_ONE_MINUS_SRC_COLOR (0x00000030)
+# define MGA_DST_SRC_ALPHA (0x00000040)
+# define MGA_DST_ONE_MINUS_SRC_ALPHA (0x00000050)
+# define MGA_DST_DST_ALPHA (0x00000060)
+# define MGA_DST_ONE_MINUS_DST_ALPHA (0x00000070)
+# define MGA_DST_BLEND_MASK (0x00000070)
+# define MGA_ALPHACHANNEL (0x00000100)
+# define MGA_VIDEOALPHA (0x00000200)
+# define MGA_DIFFUSEDALPHA (0x01000000)
+# define MGA_MODULATEDALPHA (0x02000000)
+#define MGAREG_TDUALSTAGE0 (0x2CF8)
+#define MGAREG_TDUALSTAGE1 (0x2CFC)
+# define MGA_TDS_COLOR_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ARG2_SPECULAR (0x00000001)
+# define MGA_TDS_COLOR_ARG2_FCOL (0x00000002)
+# define MGA_TDS_COLOR_ARG2_PREVSTAGE (0x00000003)
+# define MGA_TDS_COLOR_ALPHA_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ALPHA_FCOL (0x00000004)
+# define MGA_TDS_COLOR_ALPHA_CURRTEX (0x00000008)
+# define MGA_TDS_COLOR_ALPHA_PREVTEX (0x0000000c)
+# define MGA_TDS_COLOR_ALPHA_PREVSTAGE (0x00000010)
+# define MGA_TDS_COLOR_ARG1_REPLICATEALPHA (0x00000020)
+# define MGA_TDS_COLOR_ARG1_INV (0x00000040)
+# define MGA_TDS_COLOR_ARG2_REPLICATEALPHA (0x00000080)
+# define MGA_TDS_COLOR_ARG2_INV (0x00000100)
+# define MGA_TDS_COLOR_ALPHA1INV (0x00000200)
+# define MGA_TDS_COLOR_ALPHA2INV (0x00000400)
+# define MGA_TDS_COLOR_ARG1MUL_ALPHA1 (0x00000800)
+# define MGA_TDS_COLOR_ARG2MUL_ALPHA2 (0x00001000)
+# define MGA_TDS_COLOR_ARG1ADD_MULOUT (0x00002000)
+# define MGA_TDS_COLOR_ARG2ADD_MULOUT (0x00004000)
+# define MGA_TDS_COLOR_MODBRIGHT_2X (0x00008000)
+# define MGA_TDS_COLOR_MODBRIGHT_4X (0x00010000)
+# define MGA_TDS_COLOR_ADD_SUB (0x00000000)
+# define MGA_TDS_COLOR_ADD_ADD (0x00020000)
+# define MGA_TDS_COLOR_ADD2X (0x00040000)
+# define MGA_TDS_COLOR_ADDBIAS (0x00080000)
+# define MGA_TDS_COLOR_BLEND (0x00100000)
+# define MGA_TDS_COLOR_SEL_ARG1 (0x00000000)
+# define MGA_TDS_COLOR_SEL_ARG2 (0x00200000)
+# define MGA_TDS_COLOR_SEL_ADD (0x00400000)
+# define MGA_TDS_COLOR_SEL_MUL (0x00600000)
+# define MGA_TDS_ALPHA_ARG1_INV (0x00800000)
+# define MGA_TDS_ALPHA_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_ALPHA_ARG2_FCOL (0x01000000)
+# define MGA_TDS_ALPHA_ARG2_PREVTEX (0x02000000)
+# define MGA_TDS_ALPHA_ARG2_PREVSTAGE (0x03000000)
+# define MGA_TDS_ALPHA_ARG2_INV (0x04000000)
+# define MGA_TDS_ALPHA_ADD (0x08000000)
+# define MGA_TDS_ALPHA_ADDBIAS (0x10000000)
+# define MGA_TDS_ALPHA_ADD2X (0x20000000)
+# define MGA_TDS_ALPHA_SEL_ARG1 (0x00000000)
+# define MGA_TDS_ALPHA_SEL_ARG2 (0x40000000)
+# define MGA_TDS_ALPHA_SEL_ADD (0x80000000)
+# define MGA_TDS_ALPHA_SEL_MUL (0xc0000000)
+
+#define MGAREG_DWGSYNC 0x2c4c
+
+#define MGAREG_AGP_PLL 0x1e4c
+#define MGA_AGP2XPLL_ENABLE 0x1
+#define MGA_AGP2XPLL_DISABLE 0x0
+
+#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
new file mode 100644
index 00000000000..5a00e90696d
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "mgag200_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct mga_device *
+mgag200_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct mga_device, ttm.bdev);
+}
+
+static int
+mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &mgag200_ttm_mem_global_init;
+ global_ref->release = &mgag200_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+static void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct mgag200_bo *bo;
+
+ bo = container_of(tbo, struct mgag200_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &mgag200_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ if (!mgag200_ttm_bo_is_mgag200_bo(bo))
+ return;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
+ *pl = mgabo->placement;
+}
+
+static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
+}
+
+static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct mga_device *mdev = mgag200_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int mgag200_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func mgag200_tt_backend_func = {
+ .destroy = &mgag200_ttm_backend_destroy,
+};
+
+
+static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &mgag200_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver mgag200_bo_driver = {
+ .ttm_tt_create = mgag200_ttm_tt_create,
+ .ttm_tt_populate = mgag200_ttm_tt_populate,
+ .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
+ .init_mem_type = mgag200_bo_init_mem_type,
+ .evict_flags = mgag200_bo_evict_flags,
+ .move = mgag200_bo_move,
+ .verify_access = mgag200_bo_verify_access,
+ .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
+ .io_mem_free = &mgag200_ttm_io_mem_free,
+};
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+ int ret;
+ struct drm_device *dev = mdev->dev;
+ struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+
+ ret = mgag200_ttm_global_init(mdev);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&mdev->ttm.bdev,
+ mdev->ttm.bo_global_ref.ref.object,
+ &mgag200_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
+ return 0;
+}
+
+void mgag200_mm_fini(struct mga_device *mdev)
+{
+ ttm_bo_device_release(&mdev->ttm.bdev);
+
+ mgag200_ttm_global_release(mdev);
+
+ arch_phys_wc_del(mdev->fb_mtrr);
+ mdev->fb_mtrr = 0;
+}
+
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pmgabo)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mgag200_bo *mgabo;
+ size_t acc_size;
+ int ret;
+
+ mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
+ if (!mgabo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &mgabo->gem, size);
+ if (ret) {
+ kfree(mgabo);
+ return ret;
+ }
+
+ mgabo->bo.bdev = &mdev->ttm.bdev;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
+ sizeof(struct mgag200_bo));
+
+ ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
+ ttm_bo_type_device, &mgabo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, mgag200_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pmgabo = mgabo;
+ return 0;
+}
+
+static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
+ }
+
+ mgag200_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
+}
+
+int mgag200_bo_unpin(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int mgag200_bo_push_sysram(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct mga_device *mdev;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ mdev = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 00000000000..f1238896785
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,35 @@
+
+config DRM_MSM
+ tristate "MSM DRM"
+ depends on DRM
+ depends on MSM_IOMMU
+ depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ select DRM_KMS_HELPER
+ select SHMEM
+ select TMPFS
+ default y
+ help
+ DRM/KMS driver for MSM/snapdragon.
+
+config DRM_MSM_FBDEV
+ bool "Enable legacy fbdev support for MSM modesetting driver"
+ depends on DRM_MSM
+ select DRM_KMS_FB_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provide the linux console
+ support on top of the MSM modesetting driver.
+
+config DRM_MSM_REGISTER_LOGGING
+ bool "MSM DRM register logging"
+ depends on DRM_MSM
+ default n
+ help
+ Compile in support for logging register reads/writes in a format
+ that can be parsed by envytools demsm tool. If enabled, register
+ logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 00000000000..93ca49c8df4
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,43 @@
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ ccflags-y += -Werror
+endif
+
+msm-y := \
+ adreno/adreno_gpu.o \
+ adreno/a3xx_gpu.o \
+ hdmi/hdmi.o \
+ hdmi/hdmi_audio.o \
+ hdmi/hdmi_bridge.o \
+ hdmi/hdmi_connector.o \
+ hdmi/hdmi_i2c.o \
+ hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8x60.o \
+ hdmi/hdmi_phy_8x74.o \
+ mdp/mdp_format.o \
+ mdp/mdp_kms.o \
+ mdp/mdp4/mdp4_crtc.o \
+ mdp/mdp4/mdp4_dtv_encoder.o \
+ mdp/mdp4/mdp4_irq.o \
+ mdp/mdp4/mdp4_kms.o \
+ mdp/mdp4/mdp4_plane.o \
+ mdp/mdp5/mdp5_crtc.o \
+ mdp/mdp5/mdp5_encoder.o \
+ mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_plane.o \
+ mdp/mdp5/mdp5_smp.o \
+ msm_drv.o \
+ msm_fb.o \
+ msm_gem.o \
+ msm_gem_prime.o \
+ msm_gem_submit.o \
+ msm_gpu.o \
+ msm_iommu.o \
+ msm_perf.o \
+ msm_rd.o \
+ msm_ringbuffer.o
+
+msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
+
+obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 00000000000..9c4255b9802
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,87 @@
+NOTES about msm drm/kms driver:
+
+In the current snapdragon SoC's, we have (at least) 3 different
+display controller blocks at play:
+ + MDP3 - ?? seems to be what is on geeksphone peak device
+ + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
+ + MDP5 - snapdragon 800
+
+(I don't have a completely clear picture on which display controller
+maps to which part #)
+
+Plus a handful of blocks around them for HDMI/DSI/etc output.
+
+And on gpu side of things:
+ + zero, one, or two 2d cores (z180)
+ + and either a2xx or a3xx 3d core.
+
+But, HDMI/DSI/etc blocks seem like they can be shared across multiple
+display controller blocks. And I for sure don't want to have to deal
+with N different kms devices from xf86-video-freedreno. Plus, it
+seems like we can do some clever tricks like use GPU to trigger
+pageflip after rendering completes (ie. have the kms/crtc code build
+up gpu cmdstream to update scanout and write FLUSH register after).
+
+So, the approach is one drm driver, with some modularity. Different
+'struct msm_kms' implementations, depending on display controller.
+And one or more 'struct msm_gpu' for the various different gpu sub-
+modules.
+
+(Second part is not implemented yet. So far this is just basic KMS
+driver, and not exposing any custom ioctls to userspace for now.)
+
+The kms module provides the plane, crtc, and encoder objects, and
+loads whatever connectors are appropriate.
+
+For MDP4, the mapping is:
+
+ plane -> PIPE{RGBn,VGn} \
+ crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
+ encoder -> DTV/LCDC/DSI (within MDP4) /
+ connector -> HDMI/DSI/etc --> other device(s)
+
+Since the irq's that drm core mostly cares about are vblank/framedone,
+we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
+and treat the MDP4 block's irq as "the" irq. Even though the connectors
+may have their own irqs which they install themselves. For this reason
+the display controller is the "master" device.
+
+For MDP5, the mapping is:
+
+ plane -> PIPE{RGBn,VIGn} \
+ crtc -> LM (layer mixer) |-> MDP "device"
+ encoder -> INTF /
+ connector -> HDMI/DSI/eDP/etc --> other device(s)
+
+Unlike MDP4, it appears we can get by with a single encoder, rather
+than needing a different implementation for DTV, DSI, etc. (Ie. the
+register interface is same, just different bases.)
+
+Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
+etc) are routed through MDP.
+
+And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
+which blocks need to be allocated to the active pipes based on fetch
+stride.
+
+Each connector probably ends up being a separate device, just for the
+logistics of finding/mapping io region, irq, etc. Idealy we would
+have a better way than just stashing the platform device in a global
+(ie. like DT super-node.. but I don't have any snapdragon hw yet that
+is using DT).
+
+Note that so far I've not been able to get any docs on the hw, and it
+seems that access to such docs would prevent me from working on the
+freedreno gallium driver. So there may be some mistakes in register
+names (I had to invent a few, since no sufficient hint was given in
+the downstream android fbdev driver), bitfield sizes, etc. My current
+state of understanding the registers is given in the envytools rnndb
+files at:
+
+ https://github.com/freedreno/envytools/tree/master/rnndb
+ (the mdp4/hdmi/dsi directories)
+
+These files are used both for a parser tool (in the same tree) to
+parse logged register reads/writes (both from downstream android fbdev
+driver, and this driver with register logging enabled), as well as to
+generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 00000000000..85d615e7d62
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,1555 @@
+#ifndef A2XX_XML
+#define A2XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a2xx_rb_dither_type {
+ DITHER_PIXEL = 0,
+ DITHER_SUBPIXEL = 1,
+};
+
+enum a2xx_colorformatx {
+ COLORX_4_4_4_4 = 0,
+ COLORX_1_5_5_5 = 1,
+ COLORX_5_6_5 = 2,
+ COLORX_8 = 3,
+ COLORX_8_8 = 4,
+ COLORX_8_8_8_8 = 5,
+ COLORX_S8_8_8_8 = 6,
+ COLORX_16_FLOAT = 7,
+ COLORX_16_16_FLOAT = 8,
+ COLORX_16_16_16_16_FLOAT = 9,
+ COLORX_32_FLOAT = 10,
+ COLORX_32_32_FLOAT = 11,
+ COLORX_32_32_32_32_FLOAT = 12,
+ COLORX_2_3_3 = 13,
+ COLORX_8_8_8 = 14,
+};
+
+enum a2xx_sq_surfaceformat {
+ FMT_1_REVERSE = 0,
+ FMT_1 = 1,
+ FMT_8 = 2,
+ FMT_1_5_5_5 = 3,
+ FMT_5_6_5 = 4,
+ FMT_6_5_5 = 5,
+ FMT_8_8_8_8 = 6,
+ FMT_2_10_10_10 = 7,
+ FMT_8_A = 8,
+ FMT_8_B = 9,
+ FMT_8_8 = 10,
+ FMT_Cr_Y1_Cb_Y0 = 11,
+ FMT_Y1_Cr_Y0_Cb = 12,
+ FMT_5_5_5_1 = 13,
+ FMT_8_8_8_8_A = 14,
+ FMT_4_4_4_4 = 15,
+ FMT_10_11_11 = 16,
+ FMT_11_11_10 = 17,
+ FMT_DXT1 = 18,
+ FMT_DXT2_3 = 19,
+ FMT_DXT4_5 = 20,
+ FMT_24_8 = 22,
+ FMT_24_8_FLOAT = 23,
+ FMT_16 = 24,
+ FMT_16_16 = 25,
+ FMT_16_16_16_16 = 26,
+ FMT_16_EXPAND = 27,
+ FMT_16_16_EXPAND = 28,
+ FMT_16_16_16_16_EXPAND = 29,
+ FMT_16_FLOAT = 30,
+ FMT_16_16_FLOAT = 31,
+ FMT_16_16_16_16_FLOAT = 32,
+ FMT_32 = 33,
+ FMT_32_32 = 34,
+ FMT_32_32_32_32 = 35,
+ FMT_32_FLOAT = 36,
+ FMT_32_32_FLOAT = 37,
+ FMT_32_32_32_32_FLOAT = 38,
+ FMT_32_AS_8 = 39,
+ FMT_32_AS_8_8 = 40,
+ FMT_16_MPEG = 41,
+ FMT_16_16_MPEG = 42,
+ FMT_8_INTERLACED = 43,
+ FMT_32_AS_8_INTERLACED = 44,
+ FMT_32_AS_8_8_INTERLACED = 45,
+ FMT_16_INTERLACED = 46,
+ FMT_16_MPEG_INTERLACED = 47,
+ FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_DXN = 49,
+ FMT_8_8_8_8_AS_16_16_16_16 = 50,
+ FMT_DXT1_AS_16_16_16_16 = 51,
+ FMT_DXT2_3_AS_16_16_16_16 = 52,
+ FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_10_10_10_AS_16_16_16_16 = 54,
+ FMT_10_11_11_AS_16_16_16_16 = 55,
+ FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_32_32_32_FLOAT = 57,
+ FMT_DXT3A = 58,
+ FMT_DXT5A = 59,
+ FMT_CTX1 = 60,
+ FMT_DXT3A_AS_1_1_1_1 = 61,
+};
+
+enum a2xx_sq_ps_vtx_mode {
+ POSITION_1_VECTOR = 0,
+ POSITION_2_VECTORS_UNUSED = 1,
+ POSITION_2_VECTORS_SPRITE = 2,
+ POSITION_2_VECTORS_EDGE = 3,
+ POSITION_2_VECTORS_KILL = 4,
+ POSITION_2_VECTORS_SPRITE_KILL = 5,
+ POSITION_2_VECTORS_EDGE_KILL = 6,
+ MULTIPASS = 7,
+};
+
+enum a2xx_sq_sample_cntl {
+ CENTROIDS_ONLY = 0,
+ CENTERS_ONLY = 1,
+ CENTROIDS_AND_CENTERS = 2,
+};
+
+enum a2xx_dx_clip_space {
+ DXCLIP_OPENGL = 0,
+ DXCLIP_DIRECTX = 1,
+};
+
+enum a2xx_pa_su_sc_polymode {
+ POLY_DISABLED = 0,
+ POLY_DUALMODE = 1,
+};
+
+enum a2xx_rb_edram_mode {
+ EDRAM_NOP = 0,
+ COLOR_DEPTH = 4,
+ DEPTH_ONLY = 5,
+ EDRAM_COPY = 6,
+};
+
+enum a2xx_pa_sc_pattern_bit_order {
+ LITTLE = 0,
+ BIG = 1,
+};
+
+enum a2xx_pa_sc_auto_reset_cntl {
+ NEVER = 0,
+ EACH_PRIMITIVE = 1,
+ EACH_PACKET = 2,
+};
+
+enum a2xx_pa_pixcenter {
+ PIXCENTER_D3D = 0,
+ PIXCENTER_OGL = 1,
+};
+
+enum a2xx_pa_roundmode {
+ TRUNCATE = 0,
+ ROUND = 1,
+ ROUNDTOEVEN = 2,
+ ROUNDTOODD = 3,
+};
+
+enum a2xx_pa_quantmode {
+ ONE_SIXTEENTH = 0,
+ ONE_EIGTH = 1,
+ ONE_QUARTER = 2,
+ ONE_HALF = 3,
+ ONE = 4,
+};
+
+enum a2xx_rb_copy_sample_select {
+ SAMPLE_0 = 0,
+ SAMPLE_1 = 1,
+ SAMPLE_2 = 2,
+ SAMPLE_3 = 3,
+ SAMPLE_01 = 4,
+ SAMPLE_23 = 5,
+ SAMPLE_0123 = 6,
+};
+
+enum adreno_mmu_clnt_beh {
+ BEH_NEVR = 0,
+ BEH_TRAN_RNG = 1,
+ BEH_TRAN_FLT = 2,
+};
+
+enum sq_tex_clamp {
+ SQ_TEX_WRAP = 0,
+ SQ_TEX_MIRROR = 1,
+ SQ_TEX_CLAMP_LAST_TEXEL = 2,
+ SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
+ SQ_TEX_CLAMP_HALF_BORDER = 4,
+ SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
+ SQ_TEX_CLAMP_BORDER = 6,
+ SQ_TEX_MIRROR_ONCE_BORDER = 7,
+};
+
+enum sq_tex_swiz {
+ SQ_TEX_X = 0,
+ SQ_TEX_Y = 1,
+ SQ_TEX_Z = 2,
+ SQ_TEX_W = 3,
+ SQ_TEX_ZERO = 4,
+ SQ_TEX_ONE = 5,
+};
+
+enum sq_tex_filter {
+ SQ_TEX_FILTER_POINT = 0,
+ SQ_TEX_FILTER_BILINEAR = 1,
+ SQ_TEX_FILTER_BICUBIC = 2,
+};
+
+#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
+
+#define REG_A2XX_RBBM_CNTL 0x0000003b
+
+#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
+
+#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
+
+#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
+
+#define REG_A2XX_MH_MMU_CONFIG 0x00000040
+#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
+#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
+static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
+static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
+static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+
+#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
+
+#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043
+
+#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
+
+#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+
+#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
+
+#define REG_A2XX_MH_MMU_MPU_END 0x00000047
+
+#define REG_A2XX_NQWAIT_UNTIL 0x00000394
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
+
+#define REG_A2XX_RBBM_DEBUG 0x0000039b
+
+#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
+
+#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
+
+#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
+
+#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
+
+#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
+
+#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+
+#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
+
+#define REG_A2XX_RBBM_INT_ACK 0x000003b6
+
+#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+
+#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
+
+#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
+
+#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
+
+#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
+
+#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
+
+#define REG_A2XX_RBBM_STATUS 0x000005d0
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
+static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
+{
+ return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
+}
+#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
+#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
+#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
+#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
+#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
+#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
+#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
+#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
+#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
+#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
+#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
+#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
+#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
+#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
+#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
+#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
+#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
+#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
+#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
+
+#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
+#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
+#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
+#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
+#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
+#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
+#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+
+#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
+
+#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
+
+#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
+
+#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
+
+#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
+
+#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
+
+#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+
+#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
+
+#define REG_A2XX_SQ_INT_CNTL 0x00000d34
+
+#define REG_A2XX_SQ_INT_STATUS 0x00000d35
+
+#define REG_A2XX_SQ_INT_ACK 0x00000d36
+
+#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
+
+#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
+
+#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
+
+#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
+
+#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
+
+#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
+
+#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
+
+#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
+
+#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
+#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
+
+#define REG_A2XX_TP0_CHICKEN 0x00000e1e
+
+#define REG_A2XX_RB_BC_CONTROL 0x00000f01
+#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
+#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
+#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
+static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
+#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
+#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
+#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
+}
+#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
+static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
+#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
+#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
+
+#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
+
+#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
+
+#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
+
+#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+
+#define REG_A2XX_RB_COLOR_INFO 0x00002001
+#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
+#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
+static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
+}
+#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
+#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
+#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
+static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
+}
+#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
+#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
+static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
+#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
+{
+ return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_INFO 0x00002002
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
+
+#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
+#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_UNKNOWN_2010 0x00002010
+
+#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
+
+#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
+
+#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
+
+#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
+
+#define REG_A2XX_RB_COLOR_MASK 0x00002104
+#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
+#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
+#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
+#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
+
+#define REG_A2XX_RB_BLEND_RED 0x00002105
+
+#define REG_A2XX_RB_BLEND_GREEN 0x00002106
+
+#define REG_A2XX_RB_BLEND_BLUE 0x00002107
+
+#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
+
+#define REG_A2XX_RB_FOG_COLOR 0x00002109
+
+#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
+#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_ALPHA_REF 0x0000210e
+
+#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
+#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
+#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
+#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
+#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
+#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
+#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
+#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
+#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
+
+#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
+#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
+#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
+#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
+#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
+
+#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
+
+#define REG_A2XX_SQ_WRAPPING_0 0x00002183
+
+#define REG_A2XX_SQ_WRAPPING_1 0x00002184
+
+#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
+
+#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
+
+#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+
+#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
+
+#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
+#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
+#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
+#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
+#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
+}
+
+#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
+
+#define REG_A2XX_RB_COLORCONTROL 0x00002202
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
+#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
+#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
+#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
+#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
+#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
+}
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
+#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
+static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
+{
+ return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
+}
+#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
+#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
+#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
+#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
+#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
+
+#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
+#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
+#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
+#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
+#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
+#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
+#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
+#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
+#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
+#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
+
+#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
+#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
+#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
+#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
+#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_RB_MODECONTROL 0x00002208
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
+static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
+{
+ return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
+}
+
+#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
+
+#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
+
+#define REG_A2XX_CLEAR_COLOR 0x0000220b
+#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
+#define A2XX_CLEAR_COLOR_RED__SHIFT 0
+static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
+}
+#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
+#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
+static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
+}
+#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
+#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
+static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
+}
+#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
+#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
+static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
+}
+
+#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
+
+#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+}
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
+#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
+}
+
+#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
+
+#define REG_A2XX_VGT_ENHANCE 0x00002294
+
+#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
+}
+#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
+#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
+#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
+
+#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
+
+#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_SQ_VS_CONST 0x00002307
+#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_PS_CONST 0x00002308
+#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
+
+#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
+
+#define REG_A2XX_PA_SC_AA_MASK 0x00002312
+
+#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
+
+#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
+
+#define REG_A2XX_RB_COPY_CONTROL 0x00002318
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
+}
+#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
+
+#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
+#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
+#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
+
+#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
+#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
+#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
+}
+#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
+#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
+
+#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
+
+#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
+
+#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
+
+#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
+
+#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
+
+#define REG_A2XX_SQ_CONSTANT_0 0x00004000
+
+#define REG_A2XX_SQ_FETCH_0 0x00004800
+
+#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
+
+#define REG_A2XX_SQ_CF_LOOP 0x00004908
+
+#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
+
+#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
+
+#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
+
+#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
+#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
+#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
+#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_1 0x00000001
+
+#define REG_A2XX_SQ_TEX_2 0x00000002
+#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
+#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
+}
+#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
+#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
+#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
+#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
+#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
+#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
+static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
+static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
+}
+
+
+#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 00000000000..a7be56163d2
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,2259 @@
+#ifndef A3XX_XML
+#define A3XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+};
+
+enum a3xx_tile_mode {
+ LINEAR = 0,
+ TILE_32X32 = 2,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
+enum a3xx_state_block_id {
+ HLSQ_BLOCK_ID_TP_TEX = 2,
+ HLSQ_BLOCK_ID_TP_MIPMAP = 3,
+ HLSQ_BLOCK_ID_SP_VS = 4,
+ HLSQ_BLOCK_ID_SP_FS = 6,
+};
+
+enum a3xx_cache_opcode {
+ INVALIDATE = 1,
+};
+
+enum a3xx_vtx_fmt {
+ VFMT_FLOAT_32 = 0,
+ VFMT_FLOAT_32_32 = 1,
+ VFMT_FLOAT_32_32_32 = 2,
+ VFMT_FLOAT_32_32_32_32 = 3,
+ VFMT_FLOAT_16 = 4,
+ VFMT_FLOAT_16_16 = 5,
+ VFMT_FLOAT_16_16_16 = 6,
+ VFMT_FLOAT_16_16_16_16 = 7,
+ VFMT_FIXED_32 = 8,
+ VFMT_FIXED_32_32 = 9,
+ VFMT_FIXED_32_32_32 = 10,
+ VFMT_FIXED_32_32_32_32 = 11,
+ VFMT_SHORT_16 = 16,
+ VFMT_SHORT_16_16 = 17,
+ VFMT_SHORT_16_16_16 = 18,
+ VFMT_SHORT_16_16_16_16 = 19,
+ VFMT_USHORT_16 = 20,
+ VFMT_USHORT_16_16 = 21,
+ VFMT_USHORT_16_16_16 = 22,
+ VFMT_USHORT_16_16_16_16 = 23,
+ VFMT_NORM_SHORT_16 = 24,
+ VFMT_NORM_SHORT_16_16 = 25,
+ VFMT_NORM_SHORT_16_16_16 = 26,
+ VFMT_NORM_SHORT_16_16_16_16 = 27,
+ VFMT_NORM_USHORT_16 = 28,
+ VFMT_NORM_USHORT_16_16 = 29,
+ VFMT_NORM_USHORT_16_16_16 = 30,
+ VFMT_NORM_USHORT_16_16_16_16 = 31,
+ VFMT_UBYTE_8 = 40,
+ VFMT_UBYTE_8_8 = 41,
+ VFMT_UBYTE_8_8_8 = 42,
+ VFMT_UBYTE_8_8_8_8 = 43,
+ VFMT_NORM_UBYTE_8 = 44,
+ VFMT_NORM_UBYTE_8_8 = 45,
+ VFMT_NORM_UBYTE_8_8_8 = 46,
+ VFMT_NORM_UBYTE_8_8_8_8 = 47,
+ VFMT_BYTE_8 = 48,
+ VFMT_BYTE_8_8 = 49,
+ VFMT_BYTE_8_8_8 = 50,
+ VFMT_BYTE_8_8_8_8 = 51,
+ VFMT_NORM_BYTE_8 = 52,
+ VFMT_NORM_BYTE_8_8 = 53,
+ VFMT_NORM_BYTE_8_8_8 = 54,
+ VFMT_NORM_BYTE_8_8_8_8 = 55,
+ VFMT_UINT_10_10_10_2 = 60,
+ VFMT_NORM_UINT_10_10_10_2 = 61,
+ VFMT_INT_10_10_10_2 = 62,
+ VFMT_NORM_INT_10_10_10_2 = 63,
+};
+
+enum a3xx_tex_fmt {
+ TFMT_NORM_USHORT_565 = 4,
+ TFMT_NORM_USHORT_5551 = 6,
+ TFMT_NORM_USHORT_4444 = 7,
+ TFMT_NORM_UINT_X8Z24 = 10,
+ TFMT_NORM_UINT_NV12_UV_TILED = 17,
+ TFMT_NORM_UINT_NV12_Y_TILED = 19,
+ TFMT_NORM_UINT_NV12_UV = 21,
+ TFMT_NORM_UINT_NV12_Y = 23,
+ TFMT_NORM_UINT_I420_Y = 24,
+ TFMT_NORM_UINT_I420_U = 26,
+ TFMT_NORM_UINT_I420_V = 27,
+ TFMT_NORM_UINT_2_10_10_10 = 41,
+ TFMT_NORM_UINT_A8 = 44,
+ TFMT_NORM_UINT_L8_A8 = 47,
+ TFMT_NORM_UINT_8 = 48,
+ TFMT_NORM_UINT_8_8 = 49,
+ TFMT_NORM_UINT_8_8_8 = 50,
+ TFMT_NORM_UINT_8_8_8_8 = 51,
+ TFMT_FLOAT_16 = 64,
+ TFMT_FLOAT_16_16 = 65,
+ TFMT_FLOAT_16_16_16_16 = 67,
+ TFMT_FLOAT_32 = 84,
+ TFMT_FLOAT_32_32 = 85,
+ TFMT_FLOAT_32_32_32_32 = 87,
+};
+
+enum a3xx_tex_fetchsize {
+ TFETCH_DISABLE = 0,
+ TFETCH_1_BYTE = 1,
+ TFETCH_2_BYTE = 2,
+ TFETCH_4_BYTE = 3,
+ TFETCH_8_BYTE = 4,
+ TFETCH_16_BYTE = 5,
+};
+
+enum a3xx_color_fmt {
+ RB_R8G8B8_UNORM = 4,
+ RB_R8G8B8A8_UNORM = 8,
+ RB_Z16_UNORM = 12,
+ RB_A8_UNORM = 20,
+};
+
+enum a3xx_color_swap {
+ WZYX = 0,
+ WXYZ = 1,
+ ZYXW = 2,
+ XYZW = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+};
+
+enum a3xx_sp_perfcounter_select {
+ SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_FULL_ALU_INSTRUCTIONS = 14,
+ SP0_ICL1_MISSES = 26,
+ SP_ALU_ACTIVE_CYCLES = 29,
+};
+
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_tex_filter {
+ A3XX_TEX_NEAREST = 0,
+ A3XX_TEX_LINEAR = 1,
+};
+
+enum a3xx_tex_clamp {
+ A3XX_TEX_REPEAT = 0,
+ A3XX_TEX_CLAMP_TO_EDGE = 1,
+ A3XX_TEX_MIRROR_REPEAT = 2,
+ A3XX_TEX_CLAMP_NONE = 3,
+};
+
+enum a3xx_tex_swiz {
+ A3XX_TEX_X = 0,
+ A3XX_TEX_Y = 1,
+ A3XX_TEX_Z = 2,
+ A3XX_TEX_W = 3,
+ A3XX_TEX_ZERO = 4,
+ A3XX_TEX_ONE = 5,
+};
+
+enum a3xx_tex_type {
+ A3XX_TEX_1D = 0,
+ A3XX_TEX_2D = 1,
+ A3XX_TEX_CUBE = 2,
+ A3XX_TEX_3D = 3,
+};
+
+#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A3XX_INT0_VFD_ERROR 0x00000040
+#define A3XX_INT0_CP_SW_INT 0x00000080
+#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A3XX_INT0_CP_HW_FAULT 0x00000800
+#define A3XX_INT0_CP_DMA 0x00001000
+#define A3XX_INT0_CP_IB2_INT 0x00002000
+#define A3XX_INT0_CP_IB1_INT 0x00004000
+#define A3XX_INT0_CP_RB_INT 0x00008000
+#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A3XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
+
+#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
+
+#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
+
+#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
+
+#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
+
+#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
+
+#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
+
+#define REG_A3XX_RBBM_AHB_CMD 0x00000022
+
+#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
+
+#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
+
+#define REG_A3XX_RBBM_STATUS 0x00000030
+#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040
+
+#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
+
+#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
+
+#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
+
+#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
+
+#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
+
+#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
+
+#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
+
+#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
+
+#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
+
+#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
+
+#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
+
+#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
+
+#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
+
+#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
+
+#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
+
+#define REG_A3XX_CP_ROQ_DATA 0x000001cd
+
+#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
+
+#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
+
+#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
+
+#define REG_A3XX_CP_MEQ_ADDR 0x000001da
+
+#define REG_A3XX_CP_MEQ_DATA 0x000001db
+
+#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A3XX_CP_HW_FAULT 0x0000045c
+
+#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
+
+#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
+
+static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+
+#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
+#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
+#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+
+#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
+#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
+#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
+#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
+#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
+#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+
+#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
+{
+ return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+
+#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
+static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
+#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
+static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
+#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
+
+#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
+static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
+#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
+static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
+#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A3XX_RB_ALPHA_REF 0x000020c3
+#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
+#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
+static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
+}
+#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
+#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
+
+#define REG_A3XX_RB_BLEND_RED 0x000020e4
+#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
+#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
+#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
+#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb
+
+#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
+#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
+#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
+#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
+#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
+#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
+
+#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
+
+#define REG_A3XX_RB_DEPTH_INFO 0x00002102
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
+#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A3XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
+
+#define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106
+
+#define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107
+
+#define REG_A3XX_RB_STENCILREFMASK 0x00002108
+#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c
+#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002
+
+#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e
+#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff
+#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000
+#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+
+#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
+
+#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114
+
+#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+
+#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+
+#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
+
+#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+
+#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
+
+#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+
+#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+
+#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
+
+#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
+
+#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
+
+#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
+
+#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
+
+#define REG_A3XX_VFD_CONTROL_0 0x00002240
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
+static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A3XX_VFD_CONTROL_1 0x00002241
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A3XX_VFD_INDEX_MIN 0x00002242
+
+#define REG_A3XX_VFD_INDEX_MAX 0x00002243
+
+#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
+
+#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+
+static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
+}
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
+}
+
+#define REG_A3XX_VPC_ATTR 0x00002280
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
+#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
+#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
+#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
+static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
+}
+
+#define REG_A3XX_VPC_PACK 0x00002281
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
+
+#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
+#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
+static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
+static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
+#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
+
+#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
+
+#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+
+#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
+
+#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
+
+#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
+
+#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+
+#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
+
+#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+
+static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
+static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
+
+#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
+
+#define REG_A3XX_VBIF_CLKON 0x00003001
+
+#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
+
+#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
+
+#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
+
+#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
+
+#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
+
+#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+
+#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
+#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
+
+static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
+#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
+#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
+#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
+#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
+}
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c
+#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001
+
+#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
+
+#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81
+
+#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
+
+#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1
+
+#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
+
+#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
+
+#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
+
+#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
+
+#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
+
+#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
+
+#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
+
+#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
+
+#define REG_A3XX_UNKNOWN_0E43 0x00000e43
+
+#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
+
+#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
+
+#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
+
+#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
+
+#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
+
+#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
+
+#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
+
+#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
+
+#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
+
+#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
+
+#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
+}
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+
+#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
+
+#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
+
+#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
+
+#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
+
+#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
+
+#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
+
+#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
+
+#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
+
+#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
+
+#define REG_A3XX_UNKNOWN_0F03 0x00000f03
+
+#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
+
+#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
+
+#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
+
+#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0
+
+#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+
+#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
+
+#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
+#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
+#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
+#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
+#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
+#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
+#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
+
+#define REG_A3XX_TEX_SAMP_1 0x00000001
+#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
+#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
+#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
+static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_0 0x00000000
+#define A3XX_TEX_CONST_0_TILED 0x00000001
+#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A3XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
+{
+ return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
+}
+#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
+#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_1 0x00000001
+#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
+#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
+#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
+static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
+#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
+{
+ return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_2 0x00000002
+#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
+#define A3XX_TEX_CONST_2_INDX__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
+}
+#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
+#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
+static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_3 0x00000003
+
+
+#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 00000000000..942e09d898a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,709 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef CONFIG_MSM_OCMEM
+# include <mach/ocmem.h>
+#endif
+
+#include "a3xx_gpu.h"
+
+#define A3XX_INT0_MASK \
+ (A3XX_INT0_RBBM_AHB_ERROR | \
+ A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A3XX_INT0_CP_T0_PACKET_IN_IB | \
+ A3XX_INT0_CP_OPCODE_ERROR | \
+ A3XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A3XX_INT0_CP_HW_FAULT | \
+ A3XX_INT0_CP_IB1_INT | \
+ A3XX_INT0_CP_IB2_INT | \
+ A3XX_INT0_CP_RB_INT | \
+ A3XX_INT0_CP_REG_PROTECT_FAULT | \
+ A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_UCHE_OOB_ACCESS)
+
+
+static bool hang_debug = false;
+MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
+module_param_named(hang_debug, hang_debug, bool, 0600);
+static void a3xx_dump(struct msm_gpu *gpu);
+
+static void a3xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+ gpu->funcs->idle(gpu);
+}
+
+static int a3xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ DBG("%s", gpu->name);
+
+ if (adreno_is_a305(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+
+ } else if (adreno_is_a320(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+
+ } else if (adreno_is_a330v2(adreno_gpu)) {
+ /*
+ * Most of the VBIF registers on 8974v2 have the correct
+ * values at power on, so we won't modify those if we don't
+ * need to
+ */
+ /* Enable 1k sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Disable VBIF clock gating. This is to enable AXI running
+ * higher frequency than GPU:
+ */
+ gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
+
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter: */
+ gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection: */
+ gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits. This lets us get
+ * useful information on failure:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting: */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Turn on the power counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
+
+ /* Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
+
+ /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
+ gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
+
+ /* Enable Clock gating: */
+ if (adreno_is_a320(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+ else if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
+
+ if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
+
+ /* Set the OCMEM base address for A330, etc */
+ if (a3xx_gpu->ocmem_hdl) {
+ gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+ }
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Enable the perfcntrs that we use.. */
+ for (i = 0; i < gpu->num_perfcntrs; i++) {
+ const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
+ gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
+ }
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
+
+ /* RB registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
+
+ /* VBIF registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->pm4->data);
+ len = adreno_gpu->pm4->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->pfp->data);
+ len = adreno_gpu->pfp->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
+ if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* NOTE: this (value take from downstream android driver)
+ * includes some bits outside of the known bitfields. But
+ * A330 has this "MERCIU queue" thing too, which might
+ * explain a new bitfield or reshuffling:
+ */
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
+ }
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ a3xx_me_init(gpu);
+
+ return 0;
+}
+
+static void a3xx_recover(struct msm_gpu *gpu)
+{
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a3xx_dump(gpu);
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a3xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+ if (a3xx_gpu->ocmem_base)
+ ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
+#endif
+
+ kfree(a3xx_gpu);
+}
+
+static void a3xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ adreno_idle(gpu);
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
+ A3XX_RBBM_STATUS_GPU_BUSY)))
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
+ DBG("%s: %08x", gpu->name, status);
+
+ // TODO
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a3xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
+ 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
+ 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
+ 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
+ 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
+ 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
+ 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
+ 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
+ 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
+ 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
+ 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
+ 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
+ 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
+ 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
+ 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
+ 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
+ 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
+ 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
+ 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+ 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
+ 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
+ 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
+ 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
+ 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+ 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
+ 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
+ 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
+ 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
+ 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
+ 0x303c, 0x303c, 0x305e, 0x305f,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ struct drm_device *dev = gpu->dev;
+ int i;
+
+ adreno_show(gpu, m);
+
+ mutex_lock(&dev->struct_mutex);
+
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A3XX_RBBM_STATUS));
+
+ /* dump these out in a form that can be parsed by demsm: */
+ seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
+ for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
+ uint32_t start = a3xx_registers[i];
+ uint32_t end = a3xx_registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ uint32_t val = gpu_read(gpu, addr);
+ seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ }
+ }
+
+ gpu->funcs->pm_suspend(gpu);
+
+ mutex_unlock(&dev->struct_mutex);
+}
+#endif
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a3xx_dump(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump(gpu);
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A3XX_RBBM_STATUS));
+
+ /* dump these out in a form that can be parsed by demsm: */
+ printk("IO:region %s 00000000 00020000\n", gpu->name);
+ for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
+ uint32_t start = a3xx_registers[i];
+ uint32_t end = a3xx_registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ uint32_t val = gpu_read(gpu, addr);
+ printk("IO:R %08x %08x\n", addr<<2, val);
+ }
+ }
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a3xx_recover,
+ .last_fence = adreno_last_fence,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .idle = a3xx_idle,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a3xx_show,
+#endif
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+ { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
+ SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
+ { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
+ SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
+};
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+{
+ struct a3xx_gpu *a3xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a3xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ config = pdev->dev.platform_data;
+
+ a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
+ if (!a3xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a3xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a3xx_gpu->pdev = pdev;
+
+ gpu->fast_rate = config->fast_rate;
+ gpu->slow_rate = config->slow_rate;
+ gpu->bus_freq = config->bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+ gpu->bus_scale_table = config->bus_scale_table;
+#endif
+
+ DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+ gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
+ if (ret)
+ goto fail;
+
+ /* if needed, allocate gmem: */
+ if (adreno_is_a330(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+ /* TODO this is different/missing upstream: */
+ struct ocmem_buf *ocmem_hdl =
+ ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+ a3xx_gpu->ocmem_hdl = ocmem_hdl;
+ a3xx_gpu->ocmem_base = ocmem_hdl->addr;
+ adreno_gpu->gmem = ocmem_hdl->len;
+ DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+ a3xx_gpu->ocmem_base);
+#endif
+ }
+
+ if (!gpu->mmu) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ dev_err(dev->dev, "No memory protection without IOMMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
+
+fail:
+ if (a3xx_gpu)
+ a3xx_destroy(&a3xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * The a3xx device:
+ */
+
+#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
+# include <mach/kgsl.h>
+#endif
+
+static void set_gpu_pdev(struct drm_device *dev,
+ struct platform_device *pdev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->gpu_pdev = pdev;
+}
+
+static int a3xx_bind(struct device *dev, struct device *master, void *data)
+{
+ static struct adreno_platform_config config = {};
+#ifdef CONFIG_OF
+ struct device_node *child, *node = dev->of_node;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(node, "qcom,chipid", &val);
+ if (ret) {
+ dev_err(dev, "could not find chipid: %d\n", ret);
+ return ret;
+ }
+
+ config.rev = ADRENO_REV((val >> 24) & 0xff,
+ (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
+
+ /* find clock rates: */
+ config.fast_rate = 0;
+ config.slow_rate = ~0;
+ for_each_child_of_node(node, child) {
+ if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
+ struct device_node *pwrlvl;
+ for_each_child_of_node(child, pwrlvl) {
+ ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
+ if (ret) {
+ dev_err(dev, "could not find gpu-freq: %d\n", ret);
+ return ret;
+ }
+ config.fast_rate = max(config.fast_rate, val);
+ config.slow_rate = min(config.slow_rate, val);
+ }
+ }
+ }
+
+ if (!config.fast_rate) {
+ dev_err(dev, "could not find clk rates\n");
+ return -ENXIO;
+ }
+
+#else
+ struct kgsl_device_platform_data *pdata = dev->platform_data;
+ uint32_t version = socinfo_get_version();
+ if (cpu_is_apq8064ab()) {
+ config.fast_rate = 450000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 4;
+ config.rev = ADRENO_REV(3, 2, 1, 0);
+ } else if (cpu_is_apq8064()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 4;
+
+ if (SOCINFO_VERSION_MAJOR(version) == 2)
+ config.rev = ADRENO_REV(3, 2, 0, 2);
+ else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 1))
+ config.rev = ADRENO_REV(3, 2, 0, 1);
+ else
+ config.rev = ADRENO_REV(3, 2, 0, 0);
+
+ } else if (cpu_is_msm8960ab()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 320000000;
+ config.bus_freq = 4;
+
+ if (SOCINFO_VERSION_MINOR(version) == 0)
+ config.rev = ADRENO_REV(3, 2, 1, 0);
+ else
+ config.rev = ADRENO_REV(3, 2, 1, 1);
+
+ } else if (cpu_is_msm8930()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 3;
+
+ if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 2))
+ config.rev = ADRENO_REV(3, 0, 5, 2);
+ else
+ config.rev = ADRENO_REV(3, 0, 5, 0);
+
+ }
+# ifdef CONFIG_MSM_BUS_SCALING
+ config.bus_scale_table = pdata->bus_scale_table;
+# endif
+#endif
+ dev->platform_data = &config;
+ set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ return 0;
+}
+
+static void a3xx_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ set_gpu_pdev(dev_get_drvdata(master), NULL);
+}
+
+static const struct component_ops a3xx_ops = {
+ .bind = a3xx_bind,
+ .unbind = a3xx_unbind,
+};
+
+static int a3xx_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &a3xx_ops);
+}
+
+static int a3xx_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &a3xx_ops);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,kgsl-3d0" },
+ {}
+};
+
+static struct platform_driver a3xx_driver = {
+ .probe = a3xx_probe,
+ .remove = a3xx_remove,
+ .driver = {
+ .name = "kgsl-3d0",
+ .of_match_table = dt_match,
+ },
+};
+
+void __init a3xx_register(void)
+{
+ platform_driver_register(&a3xx_driver);
+}
+
+void __exit a3xx_unregister(void)
+{
+ platform_driver_unregister(&a3xx_driver);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 00000000000..bb9a8ca0507
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __A3XX_GPU_H__
+#define __A3XX_GPU_H__
+
+#include "adreno_gpu.h"
+#include "a3xx.xml.h"
+
+struct a3xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ /* if OCMEM is used for GMEM: */
+ uint32_t ocmem_base;
+ void *ocmem_hdl;
+};
+#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+
+#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 00000000000..d6e6ce2d1ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,411 @@
+#ifndef ADRENO_COMMON_XML
+#define ADRENO_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum adreno_pa_su_sc_draw {
+ PC_DRAW_POINTS = 0,
+ PC_DRAW_LINES = 1,
+ PC_DRAW_TRIANGLES = 2,
+};
+
+enum adreno_compare_func {
+ FUNC_NEVER = 0,
+ FUNC_LESS = 1,
+ FUNC_EQUAL = 2,
+ FUNC_LEQUAL = 3,
+ FUNC_GREATER = 4,
+ FUNC_NOTEQUAL = 5,
+ FUNC_GEQUAL = 6,
+ FUNC_ALWAYS = 7,
+};
+
+enum adreno_stencil_op {
+ STENCIL_KEEP = 0,
+ STENCIL_ZERO = 1,
+ STENCIL_REPLACE = 2,
+ STENCIL_INCR_CLAMP = 3,
+ STENCIL_DECR_CLAMP = 4,
+ STENCIL_INVERT = 5,
+ STENCIL_INCR_WRAP = 6,
+ STENCIL_DECR_WRAP = 7,
+};
+
+enum adreno_rb_blend_factor {
+ FACTOR_ZERO = 0,
+ FACTOR_ONE = 1,
+ FACTOR_SRC_COLOR = 4,
+ FACTOR_ONE_MINUS_SRC_COLOR = 5,
+ FACTOR_SRC_ALPHA = 6,
+ FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ FACTOR_DST_COLOR = 8,
+ FACTOR_ONE_MINUS_DST_COLOR = 9,
+ FACTOR_DST_ALPHA = 10,
+ FACTOR_ONE_MINUS_DST_ALPHA = 11,
+ FACTOR_CONSTANT_COLOR = 12,
+ FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
+ FACTOR_CONSTANT_ALPHA = 14,
+ FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
+ FACTOR_SRC_ALPHA_SATURATE = 16,
+};
+
+enum adreno_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_MIN_DST_SRC = 2,
+ BLEND_MAX_DST_SRC = 3,
+ BLEND_DST_MINUS_SRC = 4,
+ BLEND_DST_PLUS_SRC_BIAS = 5,
+};
+
+enum adreno_rb_surface_endian {
+ ENDIAN_NONE = 0,
+ ENDIAN_8IN16 = 1,
+ ENDIAN_8IN32 = 2,
+ ENDIAN_16IN32 = 3,
+ ENDIAN_8IN64 = 4,
+ ENDIAN_8IN128 = 5,
+};
+
+enum adreno_rb_dither_mode {
+ DITHER_DISABLE = 0,
+ DITHER_ALWAYS = 1,
+ DITHER_IF_ALPHA_OFF = 2,
+};
+
+enum adreno_rb_depth_format {
+ DEPTHX_16 = 0,
+ DEPTHX_24_8 = 1,
+};
+
+#define REG_AXXX_CP_RB_BASE 0x000001c0
+
+#define REG_AXXX_CP_RB_CNTL 0x000001c1
+#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
+#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
+static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
+#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
+static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
+#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
+static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
+}
+#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
+#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
+#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
+
+#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
+}
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
+}
+
+#define REG_AXXX_CP_RB_RPTR 0x000001c4
+
+#define REG_AXXX_CP_RB_WPTR 0x000001c5
+
+#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
+
+#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
+
+#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
+
+#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
+}
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
+#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
+#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
+#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
+#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
+}
+
+#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
+#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
+#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
+static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
+{
+ return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
+#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
+#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
+static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
+}
+
+#define REG_AXXX_SCRATCH_UMSK 0x000001dc
+#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
+#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
+static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
+}
+#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
+#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
+static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
+}
+
+#define REG_AXXX_SCRATCH_ADDR 0x000001dd
+
+#define REG_AXXX_CP_ME_RDADDR 0x000001ea
+
+#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
+
+#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
+
+#define REG_AXXX_CP_INT_CNTL 0x000001f2
+
+#define REG_AXXX_CP_INT_STATUS 0x000001f3
+
+#define REG_AXXX_CP_INT_ACK 0x000001f4
+
+#define REG_AXXX_CP_ME_CNTL 0x000001f6
+
+#define REG_AXXX_CP_ME_STATUS 0x000001f7
+
+#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
+
+#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
+
+#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
+
+#define REG_AXXX_CP_DEBUG 0x000001fc
+#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
+#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
+#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
+#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
+#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
+#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
+#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
+#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
+
+#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
+#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440
+
+#define REG_AXXX_CP_STQ_ST_STAT 0x00000443
+
+#define REG_AXXX_CP_ST_BASE 0x0000044d
+
+#define REG_AXXX_CP_ST_BUFSZ 0x0000044e
+
+#define REG_AXXX_CP_MEQ_STAT 0x0000044f
+
+#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452
+
+#define REG_AXXX_CP_BIN_MASK_LO 0x00000454
+
+#define REG_AXXX_CP_BIN_MASK_HI 0x00000455
+
+#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456
+
+#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457
+
+#define REG_AXXX_CP_IB1_BASE 0x00000458
+
+#define REG_AXXX_CP_IB1_BUFSZ 0x00000459
+
+#define REG_AXXX_CP_IB2_BASE 0x0000045a
+
+#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
+
+#define REG_AXXX_CP_STAT 0x0000047f
+
+#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
+
+#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
+
+#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
+
+#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
+
+#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
+
+#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
+
+#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
+
+#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
+
+#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604
+
+#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609
+
+#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
+
+#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
+
+#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
+
+#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
+
+#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
+
+
+#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 00000000000..28ca8cd8b09
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "adreno_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+struct adreno_info {
+ struct adreno_rev rev;
+ uint32_t revn;
+ const char *name;
+ const char *pm4fw, *pfpfw;
+ uint32_t gmem;
+};
+
+#define ANY_ID 0xff
+
+static const struct adreno_info gpulist[] = {
+ {
+ .rev = ADRENO_REV(3, 0, 5, ANY_ID),
+ .revn = 305,
+ .name = "A305",
+ .pm4fw = "a300_pm4.fw",
+ .pfpfw = "a300_pfp.fw",
+ .gmem = SZ_256K,
+ }, {
+ .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
+ .revn = 320,
+ .name = "A320",
+ .pm4fw = "a300_pm4.fw",
+ .pfpfw = "a300_pfp.fw",
+ .gmem = SZ_512K,
+ }, {
+ .rev = ADRENO_REV(3, 3, 0, ANY_ID),
+ .revn = 330,
+ .name = "A330",
+ .pm4fw = "a330_pm4.fw",
+ .pfpfw = "a330_pfp.fw",
+ .gmem = SZ_1M,
+ },
+};
+
+MODULE_FIRMWARE("a300_pm4.fw");
+MODULE_FIRMWARE("a300_pfp.fw");
+MODULE_FIRMWARE("a330_pm4.fw");
+MODULE_FIRMWARE("a330_pfp.fw");
+
+#define RB_SIZE SZ_32K
+#define RB_BLKSIZE 16
+
+int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ switch (param) {
+ case MSM_PARAM_GPU_ID:
+ *value = adreno_gpu->info->revn;
+ return 0;
+ case MSM_PARAM_GMEM_SIZE:
+ *value = adreno_gpu->gmem;
+ return 0;
+ case MSM_PARAM_CHIP_ID:
+ *value = adreno_gpu->rev.patchid |
+ (adreno_gpu->rev.minor << 8) |
+ (adreno_gpu->rev.major << 16) |
+ (adreno_gpu->rev.core << 24);
+ return 0;
+ default:
+ DBG("%s: invalid param: %u", gpu->name, param);
+ return -EINVAL;
+ }
+}
+
+#define rbmemptr(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
+int adreno_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ DBG("%s", gpu->name);
+
+ /* Setup REG_CP_RB_CNTL: */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ /* size is log2(quad-words): */
+ AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
+
+ /* Setup ringbuffer address: */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
+ gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
+
+ /* Setup scratch/timestamp: */
+ gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
+
+ gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
+
+ return 0;
+}
+
+static uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return ring->cur - ring->start;
+}
+
+uint32_t adreno_last_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ return adreno_gpu->memptrs->fence;
+}
+
+void adreno_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct drm_device *dev = gpu->dev;
+ int ret;
+
+ gpu->funcs->pm_suspend(gpu);
+
+ /* reset ringbuffer: */
+ gpu->rb->cur = gpu->rb->start;
+
+ /* reset completed fence seqno, just discard anything pending: */
+ adreno_gpu->memptrs->fence = gpu->submitted_fence;
+ adreno_gpu->memptrs->rptr = 0;
+ adreno_gpu->memptrs->wptr = 0;
+
+ gpu->funcs->pm_resume(gpu);
+ ret = gpu->funcs->hw_init(gpu);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ /* hmm, oh well? */
+ }
+}
+
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb;
+ unsigned i, ibs = 0;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, submit->cmd[i].iova);
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ /* on a320, at least, we seem to need to pad things out to an
+ * even number of qwords to avoid issue w/ CP hanging on wrap-
+ * around:
+ */
+ if (ibs % 2)
+ OUT_PKT2(ring);
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->fence);
+
+ if (adreno_is_a3xx(adreno_gpu)) {
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, submit->fence);
+
+ /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+
+#if 0
+ if (adreno_is_a3xx(adreno_gpu)) {
+ /* Dummy set-constant to trigger context rollover */
+ OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+ OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+ OUT_RING(ring, 0x00000000);
+ }
+#endif
+
+ gpu->funcs->flush(gpu);
+
+ return 0;
+}
+
+void adreno_flush(struct msm_gpu *gpu)
+{
+ uint32_t wptr = get_wptr(gpu->rb);
+
+ /* ensure writes to ringbuffer have hit system memory: */
+ mb();
+
+ gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
+}
+
+void adreno_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t wptr = get_wptr(gpu->rb);
+
+ /* wait for CP to drain ringbuffer: */
+ if (spin_until(adreno_gpu->memptrs->rptr == wptr))
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+#ifdef CONFIG_DEBUG_FS
+void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ adreno_gpu->info->revn, adreno_gpu->rev.core,
+ adreno_gpu->rev.major, adreno_gpu->rev.minor,
+ adreno_gpu->rev.patchid);
+
+ seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
+ gpu->submitted_fence);
+ seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
+ seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
+ seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+}
+#endif
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+void adreno_dump(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ printk("revision: %d (%d.%d.%d.%d)\n",
+ adreno_gpu->info->revn, adreno_gpu->rev.core,
+ adreno_gpu->rev.major, adreno_gpu->rev.minor,
+ adreno_gpu->rev.patchid);
+
+ printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
+ gpu->submitted_fence);
+ printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
+ printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
+ printk("rb wptr: %d\n", get_wptr(gpu->rb));
+
+}
+
+static uint32_t ring_freewords(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t size = gpu->rb->size / 4;
+ uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t rptr = adreno_gpu->memptrs->rptr;
+ return (rptr + (size - 1) - wptr) % size;
+}
+
+void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+{
+ if (spin_until(ring_freewords(gpu) >= ndwords))
+ DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+}
+
+static const char *iommu_ports[] = {
+ "gfx3d_user", "gfx3d_priv",
+ "gfx3d1_user", "gfx3d1_priv",
+};
+
+static inline bool _rev_match(uint8_t entry, uint8_t id)
+{
+ return (entry == ANY_ID) || (entry == id);
+}
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ struct adreno_rev rev)
+{
+ struct msm_mmu *mmu;
+ int i, ret;
+
+ /* identify gpu: */
+ for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
+ const struct adreno_info *info = &gpulist[i];
+ if (_rev_match(info->rev.core, rev.core) &&
+ _rev_match(info->rev.major, rev.major) &&
+ _rev_match(info->rev.minor, rev.minor) &&
+ _rev_match(info->rev.patchid, rev.patchid)) {
+ gpu->info = info;
+ gpu->revn = info->revn;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(gpulist)) {
+ dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
+ rev.core, rev.major, rev.minor, rev.patchid);
+ return -ENXIO;
+ }
+
+ DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
+ rev.core, rev.major, rev.minor, rev.patchid);
+
+ gpu->funcs = funcs;
+ gpu->gmem = gpu->info->gmem;
+ gpu->rev = rev;
+
+ ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
+ if (ret) {
+ dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
+ gpu->info->pm4fw, ret);
+ return ret;
+ }
+
+ ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
+ if (ret) {
+ dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
+ gpu->info->pfpfw, ret);
+ return ret;
+ }
+
+ ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
+ gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
+ RB_SIZE);
+ if (ret)
+ return ret;
+
+ mmu = gpu->base.mmu;
+ if (mmu) {
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ return ret;
+ }
+
+ gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
+ MSM_BO_UNCACHED);
+ if (IS_ERR(gpu->memptrs_bo)) {
+ ret = PTR_ERR(gpu->memptrs_bo);
+ gpu->memptrs_bo = NULL;
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ return ret;
+ }
+
+ gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
+ if (!gpu->memptrs) {
+ dev_err(drm->dev, "could not vmap memptrs\n");
+ return -ENOMEM;
+ }
+
+ ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
+ &gpu->memptrs_iova);
+ if (ret) {
+ dev_err(drm->dev, "could not map memptrs: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+{
+ if (gpu->memptrs_bo) {
+ if (gpu->memptrs_iova)
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ drm_gem_object_unreference(gpu->memptrs_bo);
+ }
+ if (gpu->pm4)
+ release_firmware(gpu->pm4);
+ if (gpu->pfp)
+ release_firmware(gpu->pfp);
+ msm_gpu_cleanup(&gpu->base);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 00000000000..63c36ce3302
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ADRENO_GPU_H__
+#define __ADRENO_GPU_H__
+
+#include <linux/firmware.h>
+
+#include "msm_gpu.h"
+
+#include "adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+
+struct adreno_rev {
+ uint8_t core;
+ uint8_t major;
+ uint8_t minor;
+ uint8_t patchid;
+};
+
+#define ADRENO_REV(core, major, minor, patchid) \
+ ((struct adreno_rev){ core, major, minor, patchid })
+
+struct adreno_gpu_funcs {
+ struct msm_gpu_funcs base;
+};
+
+struct adreno_info;
+
+struct adreno_rbmemptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t wptr;
+ volatile uint32_t fence;
+};
+
+struct adreno_gpu {
+ struct msm_gpu base;
+ struct adreno_rev rev;
+ const struct adreno_info *info;
+ uint32_t gmem; /* actual gmem size */
+ uint32_t revn; /* numeric revision name */
+ const struct adreno_gpu_funcs *funcs;
+
+ /* firmware: */
+ const struct firmware *pm4, *pfp;
+
+ /* ringbuffer rptr/wptr: */
+ // TODO should this be in msm_ringbuffer? I think it would be
+ // different for z180..
+ struct adreno_rbmemptrs *memptrs;
+ struct drm_gem_object *memptrs_bo;
+ uint32_t memptrs_iova;
+};
+#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct adreno_platform_config {
+ struct adreno_rev rev;
+ uint32_t fast_rate, slow_rate, bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bus_scale_table;
+#endif
+};
+
+#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define spin_until(X) ({ \
+ int __ret = -ETIMEDOUT; \
+ unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
+ do { \
+ if (X) { \
+ __ret = 0; \
+ break; \
+ } \
+ } while (time_before(jiffies, __t)); \
+ __ret; \
+})
+
+
+static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn >= 300) && (gpu->revn < 400);
+}
+
+static inline bool adreno_is_a305(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 305;
+}
+
+static inline bool adreno_is_a320(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 320;
+}
+
+static inline bool adreno_is_a330(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 330;
+}
+
+static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
+{
+ return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
+}
+
+int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+int adreno_hw_init(struct msm_gpu *gpu);
+uint32_t adreno_last_fence(struct msm_gpu *gpu);
+void adreno_recover(struct msm_gpu *gpu);
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+void adreno_flush(struct msm_gpu *gpu);
+void adreno_idle(struct msm_gpu *gpu);
+#ifdef CONFIG_DEBUG_FS
+void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#endif
+void adreno_dump(struct msm_gpu *gpu);
+void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ struct adreno_rev rev);
+void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+
+
+/* ringbuffer helpers (the parts that are adreno specific) */
+
+static inline void
+OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt+1);
+ OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
+}
+
+/* no-op packet: */
+static inline void
+OUT_PKT2(struct msm_ringbuffer *ring)
+{
+ adreno_wait_ring(ring->gpu, 1);
+ OUT_RING(ring, CP_TYPE2_PKT);
+}
+
+static inline void
+OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt+1);
+ OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
+}
+
+
+#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 00000000000..ae992c71703
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,266 @@
+#ifndef ADRENO_PM4_XML
+#define ADRENO_PM4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum vgt_event_type {
+ VS_DEALLOC = 0,
+ PS_DEALLOC = 1,
+ VS_DONE_TS = 2,
+ PS_DONE_TS = 3,
+ CACHE_FLUSH_TS = 4,
+ CONTEXT_DONE = 5,
+ CACHE_FLUSH = 6,
+ HLSQ_FLUSH = 7,
+ VIZQUERY_START = 7,
+ VIZQUERY_END = 8,
+ SC_WAIT_WC = 9,
+ RST_PIX_CNT = 13,
+ RST_VTX_CNT = 14,
+ TILE_FLUSH = 15,
+ CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+ ZPASS_DONE = 21,
+ CACHE_FLUSH_AND_INV_EVENT = 22,
+ PERFCOUNTER_START = 23,
+ PERFCOUNTER_STOP = 24,
+ VS_FETCH_DONE = 27,
+ FACENESS_FLUSH = 28,
+};
+
+enum pc_di_primtype {
+ DI_PT_NONE = 0,
+ DI_PT_POINTLIST_A2XX = 1,
+ DI_PT_LINELIST = 2,
+ DI_PT_LINESTRIP = 3,
+ DI_PT_TRILIST = 4,
+ DI_PT_TRIFAN = 5,
+ DI_PT_TRISTRIP = 6,
+ DI_PT_LINELOOP = 7,
+ DI_PT_RECTLIST = 8,
+ DI_PT_POINTLIST_A3XX = 9,
+ DI_PT_QUADLIST = 13,
+ DI_PT_QUADSTRIP = 14,
+ DI_PT_POLYGON = 15,
+ DI_PT_2D_COPY_RECT_LIST_V0 = 16,
+ DI_PT_2D_COPY_RECT_LIST_V1 = 17,
+ DI_PT_2D_COPY_RECT_LIST_V2 = 18,
+ DI_PT_2D_COPY_RECT_LIST_V3 = 19,
+ DI_PT_2D_FILL_RECT_LIST = 20,
+ DI_PT_2D_LINE_STRIP = 21,
+ DI_PT_2D_TRI_STRIP = 22,
+};
+
+enum pc_di_src_sel {
+ DI_SRC_SEL_DMA = 0,
+ DI_SRC_SEL_IMMEDIATE = 1,
+ DI_SRC_SEL_AUTO_INDEX = 2,
+ DI_SRC_SEL_RESERVED = 3,
+};
+
+enum pc_di_index_size {
+ INDEX_SIZE_IGN = 0,
+ INDEX_SIZE_16_BIT = 0,
+ INDEX_SIZE_32_BIT = 1,
+ INDEX_SIZE_8_BIT = 2,
+ INDEX_SIZE_INVALID = 0,
+};
+
+enum pc_di_vis_cull_mode {
+ IGNORE_VISIBILITY = 0,
+};
+
+enum adreno_pm4_packet_type {
+ CP_TYPE0_PKT = 0,
+ CP_TYPE1_PKT = 0x40000000,
+ CP_TYPE2_PKT = 0x80000000,
+ CP_TYPE3_PKT = 0xc0000000,
+};
+
+enum adreno_pm4_type3_packets {
+ CP_ME_INIT = 72,
+ CP_NOP = 16,
+ CP_INDIRECT_BUFFER = 63,
+ CP_INDIRECT_BUFFER_PFD = 55,
+ CP_WAIT_FOR_IDLE = 38,
+ CP_WAIT_REG_MEM = 60,
+ CP_WAIT_REG_EQ = 82,
+ CP_WAIT_REG_GTE = 83,
+ CP_WAIT_UNTIL_READ = 92,
+ CP_WAIT_IB_PFD_COMPLETE = 93,
+ CP_REG_RMW = 33,
+ CP_SET_BIN_DATA = 47,
+ CP_REG_TO_MEM = 62,
+ CP_MEM_WRITE = 61,
+ CP_MEM_WRITE_CNTR = 79,
+ CP_COND_EXEC = 68,
+ CP_COND_WRITE = 69,
+ CP_EVENT_WRITE = 70,
+ CP_EVENT_WRITE_SHD = 88,
+ CP_EVENT_WRITE_CFL = 89,
+ CP_EVENT_WRITE_ZPD = 91,
+ CP_RUN_OPENCL = 49,
+ CP_DRAW_INDX = 34,
+ CP_DRAW_INDX_2 = 54,
+ CP_DRAW_INDX_BIN = 52,
+ CP_DRAW_INDX_2_BIN = 53,
+ CP_VIZ_QUERY = 35,
+ CP_SET_STATE = 37,
+ CP_SET_CONSTANT = 45,
+ CP_IM_LOAD = 39,
+ CP_IM_LOAD_IMMEDIATE = 43,
+ CP_LOAD_CONSTANT_CONTEXT = 46,
+ CP_INVALIDATE_STATE = 59,
+ CP_SET_SHADER_BASES = 74,
+ CP_SET_BIN_MASK = 80,
+ CP_SET_BIN_SELECT = 81,
+ CP_CONTEXT_UPDATE = 94,
+ CP_INTERRUPT = 64,
+ CP_IM_STORE = 44,
+ CP_SET_DRAW_INIT_FLAGS = 75,
+ CP_SET_PROTECTED_MODE = 95,
+ CP_LOAD_STATE = 48,
+ CP_COND_INDIRECT_BUFFER_PFE = 58,
+ CP_COND_INDIRECT_BUFFER_PFD = 50,
+ CP_INDIRECT_BUFFER_PFE = 63,
+ CP_SET_BIN = 76,
+ CP_TEST_TWO_MEMS = 113,
+ CP_WAIT_FOR_ME = 19,
+ IN_IB_PREFETCH_END = 23,
+ IN_SUBBLK_PREFETCH = 31,
+ IN_INSTR_PREFETCH = 32,
+ IN_INSTR_MATCH = 71,
+ IN_CONST_PREFETCH = 73,
+ IN_INCR_UPDT_STATE = 85,
+ IN_INCR_UPDT_CONST = 86,
+ IN_INCR_UPDT_INSTR = 87,
+};
+
+enum adreno_state_block {
+ SB_VERT_TEX = 0,
+ SB_VERT_MIPADDR = 1,
+ SB_FRAG_TEX = 2,
+ SB_FRAG_MIPADDR = 3,
+ SB_VERT_SHADER = 4,
+ SB_FRAG_SHADER = 6,
+};
+
+enum adreno_state_type {
+ ST_SHADER = 0,
+ ST_CONSTANTS = 1,
+};
+
+enum adreno_state_src {
+ SS_DIRECT = 0,
+ SS_INDIRECT = 4,
+};
+
+#define REG_CP_LOAD_STATE_0 0x00000000
+#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
+#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
+#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
+static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE_1 0x00000001
+#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
+#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
+{
+ return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_0 0x00000000
+
+#define REG_CP_SET_BIN_1 0x00000001
+#define CP_SET_BIN_1_X1__MASK 0x0000ffff
+#define CP_SET_BIN_1_X1__SHIFT 0
+static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
+}
+#define CP_SET_BIN_1_Y1__MASK 0xffff0000
+#define CP_SET_BIN_1_Y1__SHIFT 16
+static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
+}
+
+#define REG_CP_SET_BIN_2 0x00000002
+#define CP_SET_BIN_2_X2__MASK 0x0000ffff
+#define CP_SET_BIN_2_X2__SHIFT 0
+static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
+}
+#define CP_SET_BIN_2_Y2__MASK 0xffff0000
+#define CP_SET_BIN_2_Y2__SHIFT 16
+static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
+}
+
+
+#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 00000000000..87be647e382
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,504 @@
+#ifndef DSI_XML
+#define DSI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum dsi_traffic_mode {
+ NON_BURST_SYNCH_PULSE = 0,
+ NON_BURST_SYNCH_EVENT = 1,
+ BURST_MODE = 2,
+};
+
+enum dsi_dst_format {
+ DST_FORMAT_RGB565 = 0,
+ DST_FORMAT_RGB666 = 1,
+ DST_FORMAT_RGB666_LOOSE = 2,
+ DST_FORMAT_RGB888 = 3,
+};
+
+enum dsi_rgb_swap {
+ SWAP_RGB = 0,
+ SWAP_RBG = 1,
+ SWAP_BGR = 2,
+ SWAP_BRG = 3,
+ SWAP_GRB = 4,
+ SWAP_GBR = 5,
+};
+
+enum dsi_cmd_trigger {
+ TRIGGER_NONE = 0,
+ TRIGGER_TE = 2,
+ TRIGGER_SW = 4,
+ TRIGGER_SW_SEOF = 5,
+ TRIGGER_SW_TE = 6,
+};
+
+#define DSI_IRQ_CMD_DMA_DONE 0x00000001
+#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
+#define DSI_IRQ_CMD_MDP_DONE 0x00000100
+#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
+#define DSI_IRQ_VIDEO_DONE 0x00010000
+#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
+#define DSI_IRQ_ERROR 0x01000000
+#define DSI_IRQ_MASK_ERROR 0x02000000
+#define REG_DSI_CTRL 0x00000000
+#define DSI_CTRL_ENABLE 0x00000001
+#define DSI_CTRL_VID_MODE_EN 0x00000002
+#define DSI_CTRL_CMD_MODE_EN 0x00000004
+#define DSI_CTRL_LANE0 0x00000010
+#define DSI_CTRL_LANE1 0x00000020
+#define DSI_CTRL_LANE2 0x00000040
+#define DSI_CTRL_LANE3 0x00000080
+#define DSI_CTRL_CLK_EN 0x00000100
+#define DSI_CTRL_ECC_CHECK 0x00100000
+#define DSI_CTRL_CRC_CHECK 0x01000000
+
+#define REG_DSI_STATUS0 0x00000004
+#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
+#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
+#define DSI_STATUS0_DSI_BUSY 0x00000010
+
+#define REG_DSI_FIFO_STATUS 0x00000008
+
+#define REG_DSI_VID_CFG0 0x0000000c
+#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
+#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
+static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
+}
+#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
+#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
+{
+ return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
+#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
+static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
+{
+ return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
+}
+#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
+#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
+#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
+#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
+#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
+#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
+
+#define REG_DSI_VID_CFG1 0x0000001c
+#define DSI_VID_CFG1_R_SEL 0x00000010
+#define DSI_VID_CFG1_G_SEL 0x00000100
+#define DSI_VID_CFG1_B_SEL 0x00001000
+#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
+static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
+}
+#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
+#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
+static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
+{
+ return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
+}
+
+#define REG_DSI_ACTIVE_H 0x00000020
+#define DSI_ACTIVE_H_START__MASK 0x00000fff
+#define DSI_ACTIVE_H_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
+}
+#define DSI_ACTIVE_H_END__MASK 0x0fff0000
+#define DSI_ACTIVE_H_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_V 0x00000024
+#define DSI_ACTIVE_V_START__MASK 0x00000fff
+#define DSI_ACTIVE_V_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
+}
+#define DSI_ACTIVE_V_END__MASK 0x0fff0000
+#define DSI_ACTIVE_V_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
+}
+
+#define REG_DSI_TOTAL 0x00000028
+#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_ACTIVE_HSYNC 0x0000002c
+#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define DSI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
+}
+#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define DSI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC 0x00000034
+#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define REG_DSI_CMD_DMA_CTRL 0x00000038
+#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
+#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
+
+#define REG_DSI_CMD_CFG0 0x0000003c
+
+#define REG_DSI_CMD_CFG1 0x00000040
+
+#define REG_DSI_DMA_BASE 0x00000044
+
+#define REG_DSI_DMA_LEN 0x00000048
+
+#define REG_DSI_ACK_ERR_STATUS 0x00000064
+
+static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+#define REG_DSI_TRIG_CTRL 0x00000080
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
+#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
+static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
+#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
+static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_STREAM 0x00000100
+#define DSI_TRIG_CTRL_TE 0x80000000
+
+#define REG_DSI_TRIG_DMA 0x0000008c
+
+#define REG_DSI_DLN0_PHY_ERR 0x000000b0
+
+#define REG_DSI_TIMEOUT_STATUS 0x000000bc
+
+#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
+}
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
+}
+
+#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
+#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
+#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
+
+#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
+
+#define REG_DSI_ERR_INT_MASK0 0x00000108
+
+#define REG_DSI_INTR_CTRL 0x0000010c
+
+#define REG_DSI_RESET 0x00000114
+
+#define REG_DSI_CLK_CTRL 0x00000118
+
+#define REG_DSI_PHY_RESET 0x00000128
+
+#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
+#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
+
+#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
+
+#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
+
+#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
+
+#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
+
+#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
+
+#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
+
+#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
+
+#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
+
+#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
+
+#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
+
+#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
+
+#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
+
+#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
+
+#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
+
+#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
+
+#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
+
+#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
+
+#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
+
+#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
+
+#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
+
+#define REG_DSI_PHY_PLL_STATUS 0x00000280
+#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
+
+#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
+
+#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
+
+#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
+
+#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
+
+#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
+
+#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
+
+#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
+
+#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
+
+#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
+
+#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
+
+#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
+#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
+
+static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; }
+
+#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400
+
+#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404
+
+#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408
+
+#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c
+
+#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414
+
+#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c
+
+#define REG_DSI_8960_PHY_CTRL_0 0x00000470
+
+#define REG_DSI_8960_PHY_CTRL_1 0x00000474
+
+#define REG_DSI_8960_PHY_CTRL_2 0x00000478
+
+#define REG_DSI_8960_PHY_CTRL_3 0x0000047c
+
+#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480
+
+#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484
+
+#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488
+
+#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c
+
+#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490
+
+#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494
+
+#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498
+
+#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c
+
+#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510
+
+#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518
+
+#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548
+
+#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
+#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
+
+
+#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 00000000000..747a6ef4211
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,116 @@
+#ifndef MMSS_CC_XML
+#define MMSS_CC_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mmss_cc_clk {
+ CLK = 0,
+ PCLK = 1,
+};
+
+#define REG_MMSS_CC_AHB 0x00000008
+
+static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
+{
+ switch (idx) {
+ case CLK: return 0x0000004c;
+ case PCLK: return 0x00000130;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+
+static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
+#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
+#define MMSS_CC_CLK_CC_MND_EN 0x00000020
+#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
+#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
+static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
+}
+#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
+#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
+#define MMSS_CC_CLK_MD_D__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
+}
+#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
+#define MMSS_CC_CLK_MD_M__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
+#define MMSS_CC_CLK_NS_SRC__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
+}
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
+static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
+}
+#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
+#define MMSS_CC_CLK_NS_VAL__SHIFT 24
+static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
+}
+
+
+#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 00000000000..48e03acf19b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,50 @@
+#ifndef SFPB_XML
+#define SFPB_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_SFPB_CFG 0x00000058
+
+
+#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 00000000000..7f7aadef8a8
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl = HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+irqreturn_t hdmi_irq(int irq, void *dev_id)
+{
+ struct hdmi *hdmi = dev_id;
+
+ /* Process HPD: */
+ hdmi_connector_irq(hdmi->connector);
+
+ /* Process DDC: */
+ hdmi_i2c_irq(hdmi->i2c);
+
+ /* TODO audio.. */
+
+ return IRQ_HANDLED;
+}
+
+void hdmi_destroy(struct kref *kref)
+{
+ struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
+ struct hdmi_phy *phy = hdmi->phy;
+
+ if (phy)
+ phy->funcs->destroy(phy);
+
+ if (hdmi->i2c)
+ hdmi_i2c_destroy(hdmi->i2c);
+
+ platform_set_drvdata(hdmi->pdev, NULL);
+}
+
+/* initialize connector */
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct hdmi *hdmi = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->hdmi_pdev;
+ struct hdmi_platform_config *config;
+ int i, ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no hdmi device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ config = pdev->dev.platform_data;
+
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ kref_init(&hdmi->refcount);
+
+ hdmi->dev = dev;
+ hdmi->pdev = pdev;
+ hdmi->config = config;
+ hdmi->encoder = encoder;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+ /* not sure about which phy maps to which msm.. probably I miss some */
+ if (config->phy_init)
+ hdmi->phy = config->phy_init(hdmi);
+ else
+ hdmi->phy = ERR_PTR(-ENXIO);
+
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ dev_err(dev->dev, "failed to load phy: %d\n", ret);
+ hdmi->phy = NULL;
+ goto fail;
+ }
+
+ hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ goto fail;
+ }
+
+ BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs));
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ struct regulator *reg;
+
+ reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->hpd_regs[i] = reg;
+ }
+
+ BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs));
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ struct regulator *reg;
+
+ reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->pwr_regs[i] = reg;
+ }
+
+ BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks));
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->hpd_clks[i] = clk;
+ }
+
+ BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks));
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->pwr_clks[i] = clk;
+ }
+
+ hdmi->i2c = hdmi_i2c_init(hdmi);
+ if (IS_ERR(hdmi->i2c)) {
+ ret = PTR_ERR(hdmi->i2c);
+ dev_err(dev->dev, "failed to get i2c: %d\n", ret);
+ hdmi->i2c = NULL;
+ goto fail;
+ }
+
+ hdmi->bridge = hdmi_bridge_init(hdmi);
+ if (IS_ERR(hdmi->bridge)) {
+ ret = PTR_ERR(hdmi->bridge);
+ dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ hdmi->bridge = NULL;
+ goto fail;
+ }
+
+ hdmi->connector = hdmi_connector_init(hdmi);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ hdmi->connector = NULL;
+ goto fail;
+ }
+
+ if (!config->shared_irq) {
+ hdmi->irq = platform_get_irq(pdev, 0);
+ if (hdmi->irq < 0) {
+ ret = hdmi->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
+ NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
+ }
+ }
+
+ encoder->bridge = hdmi->bridge;
+
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+ priv->connectors[priv->num_connectors++] = hdmi->connector;
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return hdmi;
+
+fail:
+ if (hdmi) {
+ /* bridge/connector are normally destroyed by drm: */
+ if (hdmi->bridge)
+ hdmi->bridge->funcs->destroy(hdmi->bridge);
+ if (hdmi->connector)
+ hdmi->connector->funcs->destroy(hdmi->connector);
+ hdmi_destroy(&hdmi->refcount);
+ }
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * The hdmi device:
+ */
+
+#include <linux/of_gpio.h>
+
+static void set_hdmi_pdev(struct drm_device *dev,
+ struct platform_device *pdev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->hdmi_pdev = pdev;
+}
+
+static int hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ static struct hdmi_platform_config config = {};
+#ifdef CONFIG_OF
+ struct device_node *of_node = dev->of_node;
+
+ int get_gpio(const char *name)
+ {
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+ return gpio;
+ }
+
+ /* TODO actually use DT.. */
+ static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+ static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+ static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
+ static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+
+ config.phy_init = hdmi_phy_8x74_init;
+ config.mmio_name = "core_physical";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.pwr_reg_names = pwr_reg_names;
+ config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_freq = hpd_clk_freq;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.pwr_clk_names = pwr_clk_names;
+ config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
+ config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
+ config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
+ config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
+ config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
+ config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
+ config.shared_irq = true;
+
+#else
+ static const char *hpd_clk_names[] = {
+ "core_clk", "master_iface_clk", "slave_iface_clk",
+ };
+ if (cpu_is_apq8064()) {
+ static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
+ config.phy_init = hdmi_phy_8960_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.ddc_clk_gpio = 70;
+ config.ddc_data_gpio = 71;
+ config.hpd_gpio = 72;
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = -1;
+ } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
+ static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
+ config.phy_init = hdmi_phy_8960_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.ddc_clk_gpio = 100;
+ config.ddc_data_gpio = 101;
+ config.hpd_gpio = 102;
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = -1;
+ } else if (cpu_is_msm8x60()) {
+ static const char *hpd_reg_names[] = {
+ "8901_hdmi_mvs", "8901_mpp0"
+ };
+ config.phy_init = hdmi_phy_8x60_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.ddc_clk_gpio = 170;
+ config.ddc_data_gpio = 171;
+ config.hpd_gpio = 172;
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = -1;
+ }
+#endif
+ dev->platform_data = &config;
+ set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ return 0;
+}
+
+static void hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ set_hdmi_pdev(dev_get_drvdata(master), NULL);
+}
+
+static const struct component_ops hdmi_ops = {
+ .bind = hdmi_bind,
+ .unbind = hdmi_unbind,
+};
+
+static int hdmi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &hdmi_ops);
+}
+
+static int hdmi_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &hdmi_ops);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx" },
+ {}
+};
+
+static struct platform_driver hdmi_driver = {
+ .probe = hdmi_dev_probe,
+ .remove = hdmi_dev_remove,
+ .driver = {
+ .name = "hdmi_msm",
+ .of_match_table = dt_match,
+ },
+};
+
+void __init hdmi_register(void)
+{
+ platform_driver_register(&hdmi_driver);
+}
+
+void __exit hdmi_unregister(void)
+{
+ platform_driver_unregister(&hdmi_driver);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 00000000000..9d7723c6528
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HDMI_CONNECTOR_H__
+#define __HDMI_CONNECTOR_H__
+
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/hdmi.h>
+
+#include "msm_drv.h"
+#include "hdmi.xml.h"
+
+
+struct hdmi_phy;
+struct hdmi_platform_config;
+
+struct hdmi_audio {
+ bool enabled;
+ struct hdmi_audio_infoframe infoframe;
+ int rate;
+};
+
+struct hdmi {
+ struct kref refcount;
+
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ const struct hdmi_platform_config *config;
+
+ /* audio state: */
+ struct hdmi_audio audio;
+
+ /* video state: */
+ bool power_on;
+ unsigned long int pixclock;
+
+ void __iomem *mmio;
+
+ struct regulator *hpd_regs[2];
+ struct regulator *pwr_regs[2];
+ struct clk *hpd_clks[3];
+ struct clk *pwr_clks[2];
+
+ struct hdmi_phy *phy;
+ struct i2c_adapter *i2c;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ /* the encoder we are hooked to (outside of hdmi block) */
+ struct drm_encoder *encoder;
+
+ bool hdmi_mode; /* are we in hdmi mode? */
+
+ int irq;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct hdmi_platform_config {
+ struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
+ const char *mmio_name;
+
+ /* regulators that need to be on for hpd: */
+ const char **hpd_reg_names;
+ int hpd_reg_cnt;
+
+ /* regulators that need to be on for screen pwr: */
+ const char **pwr_reg_names;
+ int pwr_reg_cnt;
+
+ /* clks that need to be on for hpd: */
+ const char **hpd_clk_names;
+ const long unsigned *hpd_freq;
+ int hpd_clk_cnt;
+
+ /* clks that need to be on for screen pwr (ie pixel clk): */
+ const char **pwr_clk_names;
+ int pwr_clk_cnt;
+
+ /* gpio's: */
+ int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+
+ /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
+ bool shared_irq;
+};
+
+void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+void hdmi_destroy(struct kref *kref);
+
+static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
+{
+ msm_writel(data, hdmi->mmio + reg);
+}
+
+static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
+{
+ return msm_readl(hdmi->mmio + reg);
+}
+
+static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
+{
+ kref_get(&hdmi->refcount);
+ return hdmi;
+}
+
+static inline void hdmi_unreference(struct hdmi *hdmi)
+{
+ kref_put(&hdmi->refcount, hdmi_destroy);
+}
+
+/*
+ * The phy appears to be different, for example between 8960 and 8x60,
+ * so split the phy related functions out and load the correct one at
+ * runtime:
+ */
+
+struct hdmi_phy_funcs {
+ void (*destroy)(struct hdmi_phy *phy);
+ void (*reset)(struct hdmi_phy *phy);
+ void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
+ void (*powerdown)(struct hdmi_phy *phy);
+};
+
+struct hdmi_phy {
+ const struct hdmi_phy_funcs *funcs;
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
+
+/*
+ * audio:
+ */
+
+int hdmi_audio_update(struct hdmi *hdmi);
+int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+ uint32_t num_of_channels, uint32_t channel_allocation,
+ uint32_t level_shift, bool down_mix);
+void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
+
+
+/*
+ * hdmi bridge:
+ */
+
+struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
+
+/*
+ * hdmi connector:
+ */
+
+void hdmi_connector_irq(struct drm_connector *connector);
+struct drm_connector *hdmi_connector_init(struct hdmi *hdmi);
+
+/*
+ * i2c adapter for ddc:
+ */
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c);
+void hdmi_i2c_destroy(struct i2c_adapter *i2c);
+struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
+
+#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 00000000000..e2636582cfd
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,575 @@
+#ifndef HDMI_XML
+#define HDMI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum hdmi_hdcp_key_state {
+ NO_KEYS = 0,
+ NOT_CHECKED = 1,
+ CHECKING = 2,
+ KEYS_VALID = 3,
+ AKSV_INVALID = 4,
+ CHECKSUM_MISMATCH = 5,
+};
+
+enum hdmi_ddc_read_write {
+ DDC_WRITE = 0,
+ DDC_READ = 1,
+};
+
+enum hdmi_acr_cts {
+ ACR_NONE = 0,
+ ACR_32 = 1,
+ ACR_44 = 2,
+ ACR_48 = 3,
+};
+
+#define REG_HDMI_CTRL 0x00000000
+#define HDMI_CTRL_ENABLE 0x00000001
+#define HDMI_CTRL_HDMI 0x00000002
+#define HDMI_CTRL_ENCRYPTED 0x00000004
+
+#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
+#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
+
+#define REG_HDMI_ACR_PKT_CTRL 0x00000024
+#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
+#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
+#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
+#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
+static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
+static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
+
+#define REG_HDMI_VBI_PKT_CTRL 0x00000028
+#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
+#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
+#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
+#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
+#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
+#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
+
+#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
+#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
+#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+
+#define REG_HDMI_GEN_PKT_CTRL 0x00000034
+#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
+#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
+#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
+}
+
+#define REG_HDMI_GC 0x00000040
+#define HDMI_GC_MUTE 0x00000001
+
+#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
+#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
+#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
+
+static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
+
+#define REG_HDMI_GENERIC0_HDR 0x00000084
+
+static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
+
+#define REG_HDMI_GENERIC1_HDR 0x000000a4
+
+static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+
+static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+#define HDMI_ACR_0_CTS__MASK 0xfffff000
+#define HDMI_ACR_0_CTS__SHIFT 12
+static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
+{
+ return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
+}
+
+static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
+#define HDMI_ACR_1_N__MASK 0xffffffff
+#define HDMI_ACR_1_N__SHIFT 0
+static inline uint32_t HDMI_ACR_1_N(uint32_t val)
+{
+ return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO0 0x000000e4
+#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
+#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
+}
+#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
+#define HDMI_AUDIO_INFO0_CC__SHIFT 8
+static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO1 0x000000e8
+#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
+#define HDMI_AUDIO_INFO1_CA__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
+}
+#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
+#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
+static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
+}
+#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
+
+#define REG_HDMI_HDCP_CTRL 0x00000110
+#define HDMI_HDCP_CTRL_ENABLE 0x00000001
+#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
+
+#define REG_HDMI_HDCP_INT_CTRL 0x00000118
+
+#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
+#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
+#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
+static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
+{
+ return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
+}
+
+#define REG_HDMI_HDCP_RESET 0x00000130
+#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
+
+#define REG_HDMI_VENSPEC_INFO0 0x0000016c
+
+#define REG_HDMI_VENSPEC_INFO1 0x00000170
+
+#define REG_HDMI_VENSPEC_INFO2 0x00000174
+
+#define REG_HDMI_VENSPEC_INFO3 0x00000178
+
+#define REG_HDMI_VENSPEC_INFO4 0x0000017c
+
+#define REG_HDMI_VENSPEC_INFO5 0x00000180
+
+#define REG_HDMI_VENSPEC_INFO6 0x00000184
+
+#define REG_HDMI_AUDIO_CFG 0x000001d0
+#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
+static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
+}
+
+#define REG_HDMI_USEC_REFTIMER 0x00000208
+
+#define REG_HDMI_DDC_CTRL 0x0000020c
+#define HDMI_DDC_CTRL_GO 0x00000001
+#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
+#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
+#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
+static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_ARBITRATION 0x00000210
+#define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010
+
+#define REG_HDMI_DDC_INT_CTRL 0x00000214
+#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
+#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
+#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
+
+#define REG_HDMI_DDC_SW_STATUS 0x00000218
+#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
+#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
+#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
+#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
+
+#define REG_HDMI_DDC_HW_STATUS 0x0000021c
+
+#define REG_HDMI_DDC_SPEED 0x00000220
+#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
+#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
+static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
+}
+#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
+#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
+static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
+}
+
+#define REG_HDMI_DDC_SETUP 0x00000224
+#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
+#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
+static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
+}
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
+#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
+}
+#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
+#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
+#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
+#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
+#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_DATA 0x00000238
+#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
+#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
+static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
+}
+#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
+#define HDMI_DDC_DATA_DATA__SHIFT 8
+static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
+}
+#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
+#define HDMI_DDC_DATA_INDEX__SHIFT 16
+static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
+}
+#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
+
+#define REG_HDMI_HPD_INT_STATUS 0x00000250
+#define HDMI_HPD_INT_STATUS_INT 0x00000001
+#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
+
+#define REG_HDMI_HPD_INT_CTRL 0x00000254
+#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
+#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
+#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
+#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
+#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
+#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
+
+#define REG_HDMI_HPD_CTRL 0x00000258
+#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
+#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
+static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
+}
+#define HDMI_HPD_CTRL_ENABLE 0x10000000
+
+#define REG_HDMI_DDC_REF 0x0000027c
+#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
+#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
+#define HDMI_DDC_REF_REFTIMER__SHIFT 0
+static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
+{
+ return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
+}
+
+#define REG_HDMI_CEC_STATUS 0x00000298
+
+#define REG_HDMI_CEC_INT 0x0000029c
+
+#define REG_HDMI_CEC_ADDR 0x000002a0
+
+#define REG_HDMI_CEC_TIME 0x000002a4
+
+#define REG_HDMI_CEC_REFTIMER 0x000002a8
+
+#define REG_HDMI_CEC_RD_DATA 0x000002ac
+
+#define REG_HDMI_CEC_RD_FILTER 0x000002b0
+
+#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
+#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
+#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
+#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
+#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
+}
+#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
+#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
+}
+
+#define REG_HDMI_TOTAL 0x000002c0
+#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define HDMI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
+}
+#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define HDMI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
+static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_FRAME_CTRL 0x000002c8
+#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
+#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
+#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
+#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
+
+#define REG_HDMI_AUD_INT 0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+
+#define REG_HDMI_PHY_CTRL 0x000002d4
+#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
+#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
+#define HDMI_PHY_CTRL_SW_RESET 0x00000004
+#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
+
+#define REG_HDMI_CEC_WR_RANGE 0x000002dc
+
+#define REG_HDMI_CEC_RD_RANGE 0x000002e0
+
+#define REG_HDMI_VERSION 0x000002e4
+
+#define REG_HDMI_CEC_COMPL_CTL 0x00000360
+
+#define REG_HDMI_CEC_RD_START_RANGE 0x00000364
+
+#define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368
+
+#define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c
+
+#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
+
+#define REG_HDMI_8x60_PHY_REG0 0x00000300
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
+static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG1 0x00000304
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
+static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
+}
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
+static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG2 0x00000308
+#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
+#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
+#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
+#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
+
+#define REG_HDMI_8x60_PHY_REG3 0x0000030c
+#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
+
+#define REG_HDMI_8x60_PHY_REG4 0x00000310
+
+#define REG_HDMI_8x60_PHY_REG5 0x00000314
+
+#define REG_HDMI_8x60_PHY_REG6 0x00000318
+
+#define REG_HDMI_8x60_PHY_REG7 0x0000031c
+
+#define REG_HDMI_8x60_PHY_REG8 0x00000320
+
+#define REG_HDMI_8x60_PHY_REG9 0x00000324
+
+#define REG_HDMI_8x60_PHY_REG10 0x00000328
+
+#define REG_HDMI_8x60_PHY_REG11 0x0000032c
+
+#define REG_HDMI_8x60_PHY_REG12 0x00000330
+#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
+#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
+#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
+
+#define REG_HDMI_8960_PHY_REG0 0x00000400
+
+#define REG_HDMI_8960_PHY_REG1 0x00000404
+
+#define REG_HDMI_8960_PHY_REG2 0x00000408
+
+#define REG_HDMI_8960_PHY_REG3 0x0000040c
+
+#define REG_HDMI_8960_PHY_REG4 0x00000410
+
+#define REG_HDMI_8960_PHY_REG5 0x00000414
+
+#define REG_HDMI_8960_PHY_REG6 0x00000418
+
+#define REG_HDMI_8960_PHY_REG7 0x0000041c
+
+#define REG_HDMI_8960_PHY_REG8 0x00000420
+
+#define REG_HDMI_8960_PHY_REG9 0x00000424
+
+#define REG_HDMI_8960_PHY_REG10 0x00000428
+
+#define REG_HDMI_8960_PHY_REG11 0x0000042c
+
+#define REG_HDMI_8960_PHY_REG12 0x00000430
+
+#define REG_HDMI_8x74_ANA_CFG0 0x00000000
+
+#define REG_HDMI_8x74_ANA_CFG1 0x00000004
+
+#define REG_HDMI_8x74_PD_CTRL0 0x00000010
+
+#define REG_HDMI_8x74_PD_CTRL1 0x00000014
+
+#define REG_HDMI_8x74_BIST_CFG0 0x00000034
+
+#define REG_HDMI_8x74_BIST_PATN0 0x0000003c
+
+#define REG_HDMI_8x74_BIST_PATN1 0x00000040
+
+#define REG_HDMI_8x74_BIST_PATN2 0x00000044
+
+#define REG_HDMI_8x74_BIST_PATN3 0x00000048
+
+
+#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
new file mode 100644
index 00000000000..872485f6013
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/hdmi.h>
+#include "hdmi.h"
+
+
+/* Supported HDMI Audio channels */
+#define MSM_HDMI_AUDIO_CHANNEL_2 0
+#define MSM_HDMI_AUDIO_CHANNEL_4 1
+#define MSM_HDMI_AUDIO_CHANNEL_6 2
+#define MSM_HDMI_AUDIO_CHANNEL_8 3
+
+/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
+static int nchannels[] = { 2, 4, 6, 8 };
+
+/* Supported HDMI Audio sample rates */
+#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
+#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
+#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
+#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
+#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
+#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
+#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
+#define MSM_HDMI_SAMPLE_RATE_MAX 7
+
+
+struct hdmi_msm_audio_acr {
+ uint32_t n; /* N parameter for clock regeneration */
+ uint32_t cts; /* CTS parameter for clock regeneration */
+};
+
+struct hdmi_msm_audio_arcs {
+ unsigned long int pixclock;
+ struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
+};
+
+#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
+
+/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
+/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
+static const struct hdmi_msm_audio_arcs acr_lut[] = {
+ /* 25.200MHz */
+ HDMI_MSM_AUDIO_ARCS(25200, {
+ {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
+ {12288, 25200}, {25088, 28000}, {24576, 25200} }),
+ /* 27.000MHz */
+ HDMI_MSM_AUDIO_ARCS(27000, {
+ {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
+ {12288, 27000}, {25088, 30000}, {24576, 27000} }),
+ /* 27.027MHz */
+ HDMI_MSM_AUDIO_ARCS(27030, {
+ {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
+ {12288, 27027}, {25088, 30030}, {24576, 27027} }),
+ /* 74.250MHz */
+ HDMI_MSM_AUDIO_ARCS(74250, {
+ {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
+ {12288, 74250}, {25088, 82500}, {24576, 74250} }),
+ /* 148.500MHz */
+ HDMI_MSM_AUDIO_ARCS(148500, {
+ {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
+ {12288, 148500}, {25088, 165000}, {24576, 148500} }),
+};
+
+static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
+ const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
+ if (arcs->pixclock == pixclock)
+ return arcs;
+ }
+
+ return NULL;
+}
+
+int hdmi_audio_update(struct hdmi *hdmi)
+{
+ struct hdmi_audio *audio = &hdmi->audio;
+ struct hdmi_audio_infoframe *info = &audio->infoframe;
+ const struct hdmi_msm_audio_arcs *arcs = NULL;
+ bool enabled = audio->enabled;
+ uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
+ uint32_t infofrm_ctrl, audio_config;
+
+ DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
+ "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
+ audio->enabled, info->channels, info->channel_allocation,
+ info->level_shift_value, info->downmix_inhibit, audio->rate);
+ DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
+
+ if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
+ DBG("disabling audio: no video");
+ enabled = false;
+ }
+
+ if (enabled) {
+ arcs = get_arcs(hdmi->pixclock);
+ if (!arcs) {
+ DBG("disabling audio: unsupported pixclock: %lu",
+ hdmi->pixclock);
+ enabled = false;
+ }
+ }
+
+ /* Read first before writing */
+ acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
+ vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
+ infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
+
+ /* Clear N/CTS selection bits */
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SELECT__MASK;
+
+ if (enabled) {
+ uint32_t n, cts, multiplier;
+ enum hdmi_acr_cts select;
+ uint8_t buf[14];
+
+ n = arcs->lut[audio->rate].n;
+ cts = arcs->lut[audio->rate].cts;
+
+ if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
+ multiplier = 4;
+ n >>= 2; /* divide N by 4 and use multiplier */
+ } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
+ multiplier = 2;
+ n >>= 1; /* divide N by 2 and use multiplier */
+ } else {
+ multiplier = 1;
+ }
+
+ DBG("n=%u, cts=%u, multiplier=%u", n, cts, multiplier);
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SOURCE;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
+
+ if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
+ select = ACR_48;
+ else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
+ select = ACR_44;
+ else /* default to 32k */
+ select = ACR_32;
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SELECT(select);
+
+ hdmi_write(hdmi, REG_HDMI_ACR_0(select - 1),
+ HDMI_ACR_0_CTS(cts));
+ hdmi_write(hdmi, REG_HDMI_ACR_1(select - 1),
+ HDMI_ACR_1_N(n));
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
+ COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
+ HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
+
+ /* configure infoframe: */
+ hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
+ (buf[3] << 0) || (buf[4] << 8) ||
+ (buf[5] << 16) || (buf[6] << 24));
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
+ (buf[7] << 0) || (buf[8] << 8));
+
+ hdmi_write(hdmi, REG_HDMI_GC, 0);
+
+ vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
+ vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
+
+ aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
+
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+
+ audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
+ audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
+ audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
+ } else {
+ hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
+ vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
+ vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
+ aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+ audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
+
+ hdmi_write(hdmi, REG_HDMI_AUD_INT,
+ COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
+ COND(enabled, HDMI_AUD_INT_AUD_SAM_DROP_INT));
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_CFG, audio_config);
+
+
+ DBG("audio %sabled", enabled ? "en" : "dis");
+
+ return 0;
+}
+
+int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+ uint32_t num_of_channels, uint32_t channel_allocation,
+ uint32_t level_shift, bool down_mix)
+{
+ struct hdmi_audio *audio;
+
+ if (!hdmi)
+ return -ENXIO;
+
+ audio = &hdmi->audio;
+
+ if (num_of_channels >= ARRAY_SIZE(nchannels))
+ return -EINVAL;
+
+ audio->enabled = enabled;
+ audio->infoframe.channels = nchannels[num_of_channels];
+ audio->infoframe.channel_allocation = channel_allocation;
+ audio->infoframe.level_shift_value = level_shift;
+ audio->infoframe.downmix_inhibit = down_mix;
+
+ return hdmi_audio_update(hdmi);
+}
+
+void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+{
+ struct hdmi_audio *audio;
+
+ if (!hdmi)
+ return;
+
+ audio = &hdmi->audio;
+
+ if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
+ return;
+
+ audio->rate = rate;
+ hdmi_audio_update(hdmi);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
new file mode 100644
index 00000000000..f6cf745c249
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
+
+static void hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ hdmi_unreference(hdmi_bridge->hdmi);
+ drm_bridge_cleanup(bridge);
+ kfree(hdmi_bridge);
+}
+
+static void power_on(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->pwr_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+
+ if (config->pwr_clk_cnt > 0) {
+ DBG("pixclock: %lu", hdmi->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (ret) {
+ dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ config->pwr_clk_names[0], ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ }
+ }
+}
+
+static void power_off(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ /* TODO do we need to wait for final vblank somewhere before
+ * cutting the clocks?
+ */
+ mdelay(16 + 4);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->pwr_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+}
+
+static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DBG("power up");
+
+ if (!hdmi->power_on) {
+ power_on(bridge);
+ hdmi->power_on = true;
+ hdmi_audio_update(hdmi);
+ }
+
+ phy->funcs->powerup(phy, hdmi->pixclock);
+ hdmi_set_mode(hdmi, true);
+}
+
+static void hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+}
+
+static void hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+}
+
+static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DBG("power down");
+ hdmi_set_mode(hdmi, false);
+ phy->funcs->powerdown(phy);
+
+ if (hdmi->power_on) {
+ power_off(bridge);
+ hdmi->power_on = false;
+ hdmi_audio_update(hdmi);
+ }
+}
+
+static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+
+ mode = adjusted_mode;
+
+ hdmi->pixclock = mode->clock * 1000;
+
+ hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
+
+ hstart = mode->htotal - mode->hsync_start;
+ hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ HDMI_ACTIVE_HSYNC_START(hstart) |
+ HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ HDMI_ACTIVE_VSYNC_START(vstart) |
+ HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(0) |
+ HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DBG("frame_ctrl=%08x", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ hdmi_audio_update(hdmi);
+}
+
+static const struct drm_bridge_funcs hdmi_bridge_funcs = {
+ .pre_enable = hdmi_bridge_pre_enable,
+ .enable = hdmi_bridge_enable,
+ .disable = hdmi_bridge_disable,
+ .post_disable = hdmi_bridge_post_disable,
+ .mode_set = hdmi_bridge_mode_set,
+ .destroy = hdmi_bridge_destroy,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
+{
+ struct drm_bridge *bridge = NULL;
+ struct hdmi_bridge *hdmi_bridge;
+ int ret;
+
+ hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL);
+ if (!hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_bridge->hdmi = hdmi_reference(hdmi);
+
+ bridge = &hdmi_bridge->base;
+
+ drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs);
+
+ return bridge;
+
+fail:
+ if (bridge)
+ hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 00000000000..28f7e3ec6c2
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gpio.h>
+
+#include "msm_kms.h"
+#include "hdmi.h"
+
+struct hdmi_connector {
+ struct drm_connector base;
+ struct hdmi *hdmi;
+ struct work_struct hpd_work;
+};
+#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
+
+static int gpio_config(struct hdmi *hdmi, bool on)
+{
+ struct drm_device *dev = hdmi->dev;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ if (on) {
+ ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+ goto error1;
+ }
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+
+ ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+ goto error2;
+ }
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+
+ ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD", config->hpd_gpio, ret);
+ goto error3;
+ }
+ gpio_direction_input(config->hpd_gpio);
+ gpio_set_value_cansleep(config->hpd_gpio, 1);
+
+ if (config->mux_en_gpio != -1) {
+ ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_en_gpio, ret);
+ goto error4;
+ }
+ gpio_set_value_cansleep(config->mux_en_gpio, 1);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_sel_gpio, ret);
+ goto error5;
+ }
+ gpio_set_value_cansleep(config->mux_sel_gpio, 0);
+ }
+ DBG("gpio on");
+ } else {
+ gpio_free(config->ddc_clk_gpio);
+ gpio_free(config->ddc_data_gpio);
+ gpio_free(config->hpd_gpio);
+
+ if (config->mux_en_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_en_gpio, 0);
+ gpio_free(config->mux_en_gpio);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+ gpio_free(config->mux_sel_gpio);
+ }
+ DBG("gpio off");
+ }
+
+ return 0;
+
+error5:
+ if (config->mux_en_gpio != -1)
+ gpio_free(config->mux_en_gpio);
+error4:
+ gpio_free(config->hpd_gpio);
+error3:
+ gpio_free(config->ddc_data_gpio);
+error2:
+ gpio_free(config->ddc_clk_gpio);
+error1:
+ return ret;
+}
+
+static int hpd_enable(struct hdmi_connector *hdmi_connector)
+{
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct drm_device *dev = hdmi_connector->base.dev;
+ struct hdmi_phy *phy = hdmi->phy;
+ uint32_t hpd_ctrl;
+ int i, ret;
+
+ ret = gpio_config(hdmi, true);
+ if (ret) {
+ dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
+ hdmi_set_mode(hdmi, false);
+ phy->funcs->reset(phy);
+ hdmi_set_mode(hdmi, true);
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static int hdp_disable(struct hdmi_connector *hdmi_connector)
+{
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct drm_device *dev = hdmi_connector->base.dev;
+ int i, ret = 0;
+
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+ hdmi_set_mode(hdmi, false);
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+
+ ret = gpio_config(hdmi, false);
+ if (ret) {
+ dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void
+hotplug_work(struct work_struct *work)
+{
+ struct hdmi_connector *hdmi_connector =
+ container_of(work, struct hdmi_connector, hpd_work);
+ struct drm_connector *connector = &hdmi_connector->base;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
+void hdmi_connector_irq(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
+
+ /* ack the irq: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!detected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ queue_work(priv->wq, &hdmi_connector->hpd_work);
+ }
+}
+
+static enum drm_connector_status detect_reg(struct hdmi *hdmi)
+{
+ uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ return gpio_get_value(config->hpd_gpio) ?
+ connector_status_connected :
+ connector_status_disconnected;
+}
+
+static enum drm_connector_status hdmi_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ enum drm_connector_status stat_gpio, stat_reg;
+ int retry = 20;
+
+ do {
+ stat_gpio = detect_gpio(hdmi);
+ stat_reg = detect_reg(hdmi);
+
+ if (stat_gpio == stat_reg)
+ break;
+
+ mdelay(10);
+ } while (--retry);
+
+ /* the status we get from reading gpio seems to be more reliable,
+ * so trust that one the most if we didn't manage to get hdmi and
+ * gpio status to agree:
+ */
+ if (stat_gpio != stat_reg) {
+ DBG("HDMI_HPD_INT_STATUS tells us: %d", stat_reg);
+ DBG("hpd gpio tells us: %d", stat_gpio);
+ }
+
+ return stat_gpio;
+}
+
+static void hdmi_connector_destroy(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+
+ hdp_disable(hdmi_connector);
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+
+ hdmi_unreference(hdmi_connector->hdmi);
+
+ kfree(hdmi_connector);
+}
+
+static int hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct edid *edid;
+ uint32_t hdmi_ctrl;
+ int ret = 0;
+
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static int hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi_connector->hdmi->encoder);
+
+ /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+ * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+ * instead):
+ */
+ if (config->pwr_clk_cnt > 0)
+ actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return 0;
+}
+
+static struct drm_encoder *
+hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ return hdmi_connector->hdmi->encoder;
+}
+
+static const struct drm_connector_funcs hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = hdmi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = hdmi_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+ .get_modes = hdmi_connector_get_modes,
+ .mode_valid = hdmi_connector_mode_valid,
+ .best_encoder = hdmi_connector_best_encoder,
+};
+
+/* initialize connector */
+struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
+{
+ struct drm_connector *connector = NULL;
+ struct hdmi_connector *hdmi_connector;
+ int ret;
+
+ hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
+ if (!hdmi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_connector->hdmi = hdmi_reference(hdmi);
+ INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
+
+ connector = &hdmi_connector->base;
+
+ drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ ret = hpd_enable(hdmi_connector);
+ if (ret) {
+ dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
+ drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+
+ return connector;
+
+fail:
+ if (connector)
+ hdmi_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 00000000000..f4ab7f70fed
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_i2c_adapter {
+ struct i2c_adapter base;
+ struct hdmi *hdmi;
+ bool sw_done;
+ wait_queue_head_t ddc_event;
+};
+#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
+
+static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SW_STATUS_RESET);
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SOFT_RESET);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+ HDMI_DDC_SPEED_THRESHOLD(2) |
+ HDMI_DDC_SPEED_PRESCALE(10));
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+ HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+ /* enable reference timer for 27us */
+ hdmi_write(hdmi, REG_HDMI_DDC_REF,
+ HDMI_DDC_REF_REFTIMER_ENABLE |
+ HDMI_DDC_REF_REFTIMER(27));
+}
+
+static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ uint32_t retry = 0xffff;
+ uint32_t ddc_int_ctrl;
+
+ do {
+ --retry;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK |
+ HDMI_DDC_INT_CTRL_SW_DONE_MASK);
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
+
+ if (!retry) {
+ dev_err(dev->dev, "timeout waiting for DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ hdmi_i2c->sw_done = false;
+
+ return 0;
+}
+
+#define MAX_TRANSACTIONS 4
+
+static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ if (!hdmi_i2c->sw_done) {
+ uint32_t ddc_int_ctrl;
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
+ (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
+ hdmi_i2c->sw_done = true;
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK);
+ }
+ }
+
+ return hdmi_i2c->sw_done;
+}
+
+static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ static const uint32_t nack[] = {
+ HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
+ HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
+ };
+ int indices[MAX_TRANSACTIONS];
+ int ret, i, j, index = 0;
+ uint32_t ddc_status, ddc_data, i2c_trans;
+
+ num = min(num, MAX_TRANSACTIONS);
+
+ WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
+
+ if (num == 0)
+ return num;
+
+ init_ddc(hdmi_i2c);
+
+ ret = ddc_clear_irq(hdmi_i2c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+ uint32_t raw_addr = p->addr << 1;
+
+ if (p->flags & I2C_M_RD)
+ raw_addr |= 1;
+
+ ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+
+ if (i == 0) {
+ ddc_data |= HDMI_DDC_DATA_INDEX(0) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+
+ indices[i] = index;
+
+ if (p->flags & I2C_M_RD) {
+ index += p->len;
+ } else {
+ for (j = 0; j < p->len; j++) {
+ ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+ }
+ }
+
+ i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
+ HDMI_I2C_TRANSACTION_REG_RW(
+ (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
+ HDMI_I2C_TRANSACTION_REG_START;
+
+ if (i == (num - 1))
+ i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
+
+ hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
+ }
+
+ /* trigger the transfer: */
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
+ HDMI_DDC_CTRL_GO);
+
+ ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ dev_warn(dev->dev, "DDC timeout: %d\n", ret);
+ DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
+ return ret;
+ }
+
+ ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
+
+ /* read back results of any read transactions: */
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+
+ if (!(p->flags & I2C_M_RD))
+ continue;
+
+ /* check for NACK: */
+ if (ddc_status & nack[i]) {
+ DBG("ddc_status=%08x", ddc_status);
+ break;
+ }
+
+ ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
+ HDMI_DDC_DATA_INDEX(indices[i]) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+
+ /* discard first byte: */
+ hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+
+ for (j = 0; j < p->len; j++) {
+ ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+ p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
+ }
+ }
+
+ return i;
+}
+
+static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm hdmi_i2c_algorithm = {
+ .master_xfer = hdmi_i2c_xfer,
+ .functionality = hdmi_i2c_func,
+};
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+
+ if (sw_done(hdmi_i2c))
+ wake_up_all(&hdmi_i2c->ddc_event);
+}
+
+void hdmi_i2c_destroy(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ i2c_del_adapter(i2c);
+ kfree(hdmi_i2c);
+}
+
+struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
+{
+ struct drm_device *dev = hdmi->dev;
+ struct hdmi_i2c_adapter *hdmi_i2c;
+ struct i2c_adapter *i2c = NULL;
+ int ret;
+
+ hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
+ if (!hdmi_i2c) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ i2c = &hdmi_i2c->base;
+
+ hdmi_i2c->hdmi = hdmi;
+ init_waitqueue_head(&hdmi_i2c->ddc_event);
+
+
+ i2c->owner = THIS_MODULE;
+ i2c->class = I2C_CLASS_DDC;
+ snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
+ i2c->dev.parent = &hdmi->pdev->dev;
+ i2c->algo = &hdmi_i2c_algorithm;
+
+ ret = i2c_add_adapter(i2c);
+ if (ret) {
+ dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
+ goto fail;
+ }
+
+ return i2c;
+
+fail:
+ if (i2c)
+ hdmi_i2c_destroy(i2c);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 00000000000..e5b7ed5b8f0
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8960 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ kfree(phy_8960);
+}
+
+static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
+static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
+}
+
+static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
+ .destroy = hdmi_phy_8960_destroy,
+ .reset = hdmi_phy_8960_reset,
+ .powerup = hdmi_phy_8960_powerup,
+ .powerdown = hdmi_phy_8960_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8960 *phy_8960;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
+ if (!phy_8960) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8960->base;
+
+ phy->funcs = &hdmi_phy_8960_funcs;
+
+ phy_8960->hdmi = hdmi;
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8960_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 00000000000..391433c1af7
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x60 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
+
+static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ kfree(phy_8x60);
+}
+
+static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+}
+
+static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+
+ /* De-serializer delay D/C for non-lbk mode: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
+ HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
+
+ if (pixclock == 27000000) {
+ /* video_format == HDMI_VFRMT_720x480p60_16_9 */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
+ }
+
+ /* No matter what, start from the power down mode: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PowerGen on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PLL power on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Write to HIGH after PLL power down de-assert: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
+ HDMI_8x60_PHY_REG3_PLL_ENABLE);
+
+ /* ASIC power on; PHY REG9 = 0 */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+
+ /* Enable PLL lock detect, PLL lock det will go high after lock
+ * Enable the re-time logic
+ */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
+
+ /* Drivers are on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* If the RX detector is needed: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
+
+ /* If we want to use lock enable based on counting: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
+ HDMI_8x60_PHY_REG12_FORCE_LOCK);
+}
+
+static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+
+ /* Assert RESET PHY from controller */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ HDMI_PHY_CTRL_SW_RESET);
+ udelay(10);
+ /* De-assert RESET PHY from controller */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
+ /* Turn off Driver */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+ udelay(10);
+ /* Disable PLL */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
+ /* Power down PHY, but keep RX-sense: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
+ .destroy = hdmi_phy_8x60_destroy,
+ .reset = hdmi_phy_8x60_reset,
+ .powerup = hdmi_phy_8x60_powerup,
+ .powerdown = hdmi_phy_8x60_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8x60 *phy_8x60;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
+ if (!phy_8x60) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8x60->base;
+
+ phy->funcs = &hdmi_phy_8x60_funcs;
+
+ phy_8x60->hdmi = hdmi;
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8x60_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
new file mode 100644
index 00000000000..59fa6cdacb2
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x74 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+ void __iomem *mmio;
+};
+#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
+
+
+static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
+{
+ msm_writel(data, phy->mmio + reg);
+}
+
+//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
+//{
+// return msm_readl(phy->mmio + reg);
+//}
+
+static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ kfree(phy_8x74);
+}
+
+static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ struct hdmi *hdmi = phy_8x74->hdmi;
+ unsigned int val;
+
+ /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
+static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+
+ phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b);
+ phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20);
+}
+
+static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
+ .destroy = hdmi_phy_8x74_destroy,
+ .reset = hdmi_phy_8x74_reset,
+ .powerup = hdmi_phy_8x74_powerup,
+ .powerdown = hdmi_phy_8x74_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8x74 *phy_8x74;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
+ if (!phy_8x74) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8x74->base;
+
+ phy->funcs = &hdmi_phy_8x74_funcs;
+
+ phy_8x74->hdmi = hdmi;
+
+ /* for 8x74, the phy mmio is mapped separately: */
+ phy_8x74->mmio = msm_ioremap(hdmi->pdev,
+ "phy_physical", "HDMI_8x74");
+ if (IS_ERR(phy_8x74->mmio)) {
+ ret = PTR_ERR(phy_8x74->mmio);
+ goto fail;
+ }
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8x74_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 00000000000..d591567173c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,52 @@
+#ifndef QFPROM_XML
+#define QFPROM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
+#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
+#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
+
+
+#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
new file mode 100644
index 00000000000..416a26e1e58
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -0,0 +1,1033 @@
+#ifndef MDP4_XML
+#define MDP4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp4_pipe {
+ VG1 = 0,
+ VG2 = 1,
+ RGB1 = 2,
+ RGB2 = 3,
+ RGB3 = 4,
+ VG3 = 5,
+ VG4 = 6,
+};
+
+enum mdp4_mixer {
+ MIXER0 = 0,
+ MIXER1 = 1,
+ MIXER2 = 2,
+};
+
+enum mdp4_intf {
+ INTF_LCDC_DTV = 0,
+ INTF_DSI_VIDEO = 1,
+ INTF_DSI_CMD = 2,
+ INTF_EBI2_TV = 3,
+};
+
+enum mdp4_cursor_format {
+ CURSOR_ARGB = 1,
+ CURSOR_XRGB = 2,
+};
+
+enum mdp4_dma {
+ DMA_P = 0,
+ DMA_S = 1,
+ DMA_E = 2,
+};
+
+#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
+#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
+#define MDP4_IRQ_DMA_S_DONE 0x00000004
+#define MDP4_IRQ_DMA_E_DONE 0x00000008
+#define MDP4_IRQ_DMA_P_DONE 0x00000010
+#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
+#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
+#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
+#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
+#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
+#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
+#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
+#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
+#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
+#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
+#define REG_MDP4_VERSION 0x00000000
+#define MDP4_VERSION_MINOR__MASK 0x00ff0000
+#define MDP4_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
+}
+#define MDP4_VERSION_MAJOR__MASK 0xff000000
+#define MDP4_VERSION_MAJOR__SHIFT 24
+static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP4_OVLP0_KICK 0x00000004
+
+#define REG_MDP4_OVLP1_KICK 0x00000008
+
+#define REG_MDP4_OVLP2_KICK 0x000000d0
+
+#define REG_MDP4_DMA_P_KICK 0x0000000c
+
+#define REG_MDP4_DMA_S_KICK 0x00000010
+
+#define REG_MDP4_DMA_E_KICK 0x00000014
+
+#define REG_MDP4_DISP_STATUS 0x00000018
+
+#define REG_MDP4_DISP_INTF_SEL 0x00000038
+#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
+#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
+static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
+}
+#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
+#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
+static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
+}
+#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
+#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
+static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
+}
+#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
+#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
+
+#define REG_MDP4_RESET_STATUS 0x0000003c
+
+#define REG_MDP4_READ_CNFG 0x0000004c
+
+#define REG_MDP4_INTR_ENABLE 0x00000050
+
+#define REG_MDP4_INTR_STATUS 0x00000054
+
+#define REG_MDP4_INTR_CLEAR 0x00000058
+
+#define REG_MDP4_EBI2_LCD0 0x00000060
+
+#define REG_MDP4_EBI2_LCD1 0x00000064
+
+#define REG_MDP4_PORTMAP_MODE 0x00000070
+
+#define REG_MDP4_CS_CONTROLLER0 0x000000c0
+
+#define REG_MDP4_CS_CONTROLLER1 0x000000c4
+
+#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
+
+#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
+
+#define REG_MDP4_VG2_CONST_COLOR 0x00031008
+
+#define REG_MDP4_OVERLAY_FLUSH 0x00018000
+#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
+#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
+#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
+#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
+#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
+#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
+
+static inline uint32_t __offset_OVLP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00010000;
+ case 1: return 0x00018000;
+ case 2: return 0x00088000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
+#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
+}
+#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
+
+static inline uint32_t __offset_STAGE(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000104;
+ case 1: return 0x00000124;
+ case 2: return 0x00000144;
+ case 3: return 0x00000160;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
+#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
+#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
+#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
+#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00001004;
+ case 1: return 0x00001404;
+ case 2: return 0x00001804;
+ case 3: return 0x00001b84;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
+
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+#define REG_MDP4_DMA_P_OP_MODE 0x00090070
+
+static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
+
+static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
+
+static inline uint32_t __offset_DMA(enum mdp4_dma idx)
+{
+ switch (idx) {
+ case DMA_P: return 0x00090000;
+ case DMA_S: return 0x000a0000;
+ case DMA_E: return 0x000b0000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
+#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
+#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
+#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
+#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
+#define MDP4_DMA_CONFIG_PACK__SHIFT 8
+static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
+}
+#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
+#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
+
+static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
+#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
+#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
+}
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
+#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
+}
+#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
+#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
+static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
+{
+ return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
+}
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
+
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_DST_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
+}
+#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_DST_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
+
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
+#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
+#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
+#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
+#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
+#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
+#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
+#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
+#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
+#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
+#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
+
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+#define REG_MDP4_LCDC 0x000c0000
+
+#define REG_MDP4_LCDC_ENABLE 0x000c0000
+
+#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
+
+#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
+
+#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
+#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
+
+#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
+
+#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
+#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
+
+#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
+
+#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
+
+#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
+
+#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
+
+#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
+#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DTV 0x000d0000
+
+#define REG_MDP4_DTV_ENABLE 0x000d0000
+
+#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
+
+#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
+
+#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
+#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
+
+#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
+
+#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
+#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
+
+#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
+
+#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
+
+#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
+
+#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
+
+#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
+#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DSI 0x000e0000
+
+#define REG_MDP4_DSI_ENABLE 0x000e0000
+
+#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
+
+#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
+
+#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
+#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
+
+#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
+
+#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
+#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
+
+#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
+
+#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
+
+#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
+
+#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
+
+#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
+#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+
+#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
new file mode 100644
index 00000000000..74cebb51e8c
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -0,0 +1,804 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp4_crtc {
+ struct drm_crtc base;
+ char name[8];
+ struct drm_plane *plane;
+ struct drm_plane *planes[8];
+ int id;
+ int ovlp;
+ enum mdp4_dma dma;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ struct {
+ spinlock_t lock;
+ bool stale;
+ uint32_t width, height;
+ uint32_t x, y;
+
+ /* next cursor to scan-out: */
+ uint32_t next_iova;
+ struct drm_gem_object *next_bo;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ } cursor;
+
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
+ struct drm_framebuffer *fb;
+
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
+ /* for unref'ing framebuffers after scanout completes: */
+ struct drm_flip_work unref_fb_work;
+
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+};
+#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
+
+static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
+
+ /* grab reference to incoming scanout fb: */
+ drm_framebuffer_reference(new_fb);
+ mdp4_crtc->base.primary->fb = new_fb;
+ mdp4_crtc->fb = new_fb;
+
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp4_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+ mdp4_crtc->scanout_fb);
+
+ mdp4_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp4_crtc->event;
+ if (event) {
+ /* if regular vblank case (!file) or if cancel-flip from
+ * preclose on file that requested flip, then send the
+ * event:
+ */
+ if (!file || (event->base.file_priv == file)) {
+ mdp4_crtc->event = NULL;
+ drm_send_vblank_event(dev, mdp4_crtc->id, event);
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(cb, struct mdp4_crtc, pageflip_cb);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+
+ if (!fb)
+ return;
+
+ drm_framebuffer_reference(fb);
+ mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
+ update_scanout(crtc, fb);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_fb_work);
+ struct drm_device *dev = mdp4_crtc->base.dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_cursor_work);
+ struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
+
+ msm_gem_put_iova(val, mdp4_kms->id);
+ drm_gem_object_unreference_unlocked(val);
+}
+
+static void mdp4_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+
+ kfree(mdp4_crtc);
+}
+
+static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("%s: mode=%d", mdp4_crtc->name, mode);
+
+ if (enabled != mdp4_crtc->enabled) {
+ if (enabled) {
+ mdp4_enable(mdp4_kms);
+ mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
+ } else {
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+ }
+ mdp4_crtc->enabled = enabled;
+ }
+}
+
+static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ int i, ovlp = mdp4_crtc->ovlp;
+ uint32_t mixer_cfg = 0;
+ static const enum mdp_mixer_stage_id stages[] = {
+ STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
+ };
+ /* statically (for now) map planes to mixer stage (z-order): */
+ static const int idxs[] = {
+ [VG1] = 1,
+ [VG2] = 2,
+ [RGB1] = 0,
+ [RGB2] = 0,
+ [RGB3] = 0,
+ [VG3] = 3,
+ [VG4] = 4,
+
+ };
+ bool alpha[4]= { false, false, false, false };
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+
+ /* TODO single register for all CRTCs, so this won't work properly
+ * when multiple CRTCs are active..
+ */
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ if (idx > 0) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(plane->fb));
+ alpha[idx-1] = format->alpha_enable;
+ }
+ mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
+ }
+ }
+
+ /* this shouldn't happen.. and seems to cause underflow: */
+ WARN_ON(!mixer_cfg);
+
+ for (i = 0; i < 4; i++) {
+ uint32_t op;
+
+ if (alpha[i]) {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
+ } else {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ int ret, ovlp = mdp4_crtc->ovlp;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mdp4_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->primary->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
+ MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
+
+ /* take data from pipe: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
+ crtc->primary->fb->pitches[0]);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
+ MDP4_DMA_DST_SIZE_WIDTH(0) |
+ MDP4_DMA_DST_SIZE_HEIGHT(0));
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
+ MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
+ crtc->primary->fb->pitches[0]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
+
+ if (dma == DMA_E) {
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
+ }
+
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
+
+ return 0;
+}
+
+static void mdp4_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s", mdp4_crtc->name);
+ /* make sure we hold a ref to mdp clks while setting up mode: */
+ mdp4_enable(get_kms(crtc));
+ mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_crtc_commit(struct drm_crtc *crtc)
+{
+ mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ crtc_flush(crtc);
+ /* drop the ref to mdp clk's that we got in prepare: */
+ mdp4_disable(get_kms(crtc));
+}
+
+static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_plane *plane = mdp4_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+ int ret;
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->primary->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
+
+ return 0;
+}
+
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *obj;
+ unsigned long flags;
+
+ if (mdp4_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
+ }
+
+ obj = msm_framebuffer_bo(new_fb, 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp4_crtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ update_fb(crtc, new_fb);
+
+ return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
+}
+
+static int mdp4_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+/* called from IRQ to update cursor related registers (if needed). The
+ * cursor registers, other than x/y position, appear not to be double
+ * buffered, and changing them other than from vblank seems to trigger
+ * underflow.
+ */
+static void update_cursor(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ if (mdp4_crtc->cursor.stale) {
+ struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
+ struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
+ uint32_t iova = mdp4_crtc->cursor.next_iova;
+
+ if (next_bo) {
+ /* take a obj ref + iova ref when we start scanning out: */
+ drm_gem_object_reference(next_bo);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+
+ /* enable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
+ MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
+ MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+ MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
+ MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
+ } else {
+ /* disable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ mdp4_kms->blank_cursor_iova);
+ }
+
+ /* and drop the iova ref + obj rev when done scanning out: */
+ if (prev_bo)
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
+
+ mdp4_crtc->cursor.scanout_bo = next_bo;
+ mdp4_crtc->cursor.stale = false;
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+}
+
+static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *cursor_bo, *old_bo;
+ unsigned long flags;
+ uint32_t iova;
+ int ret;
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ if (handle) {
+ cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+ } else {
+ cursor_bo = NULL;
+ }
+
+ if (cursor_bo) {
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ if (ret)
+ goto fail;
+ } else {
+ iova = 0;
+ }
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ old_bo = mdp4_crtc->cursor.next_bo;
+ mdp4_crtc->cursor.next_bo = cursor_bo;
+ mdp4_crtc->cursor.next_iova = iova;
+ mdp4_crtc->cursor.width = width;
+ mdp4_crtc->cursor.height = height;
+ mdp4_crtc->cursor.stale = true;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ if (old_bo) {
+ /* drop our previous reference: */
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
+ }
+
+ request_pending(crtc, PENDING_CURSOR);
+
+ return 0;
+
+fail:
+ drm_gem_object_unreference_unlocked(cursor_bo);
+ return ret;
+}
+
+static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs mdp4_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mdp4_crtc_destroy,
+ .page_flip = mdp4_crtc_page_flip,
+ .set_property = mdp4_crtc_set_property,
+ .cursor_set = mdp4_crtc_cursor_set,
+ .cursor_move = mdp4_crtc_cursor_move,
+};
+
+static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
+ .dpms = mdp4_crtc_dpms,
+ .mode_fixup = mdp4_crtc_mode_fixup,
+ .mode_set = mdp4_crtc_mode_set,
+ .prepare = mdp4_crtc_prepare,
+ .commit = mdp4_crtc_commit,
+ .mode_set_base = mdp4_crtc_mode_set_base,
+ .load_lut = mdp4_crtc_load_lut,
+};
+
+static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+
+ pending = atomic_xchg(&mdp4_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+ }
+
+ if (pending & PENDING_CURSOR) {
+ update_cursor(crtc);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ }
+}
+
+static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ return mdp4_crtc->vblank.irqmask;
+}
+
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ DBG("cancel: %p", file);
+ complete_flip(crtc, file);
+}
+
+/* set dma config, ie. the format the encoder wants. */
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t intf_sel;
+
+ intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
+
+ switch (mdp4_crtc->dma) {
+ case DMA_P:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
+ break;
+ case DMA_S:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
+ break;
+ case DMA_E:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
+ break;
+ }
+
+ if (intf == INTF_DSI_VIDEO) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ mdp4_crtc->mixer = 0;
+ } else if (intf == INTF_DSI_CMD) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
+ mdp4_crtc->mixer = 0;
+ } else if (intf == INTF_LCDC_DTV){
+ mdp4_crtc->mixer = 1;
+ }
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
+}
+
+static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
+ struct drm_plane *plane)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
+
+ if (mdp4_crtc->planes[pipe_id] == plane)
+ return;
+
+ mdp4_crtc->planes[pipe_id] = plane;
+ blend_setup(crtc);
+ if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
+ crtc_flush(crtc);
+}
+
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp4_plane_pipe(plane), plane);
+}
+
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ /* don't actually detatch our primary plane: */
+ if (to_mdp4_crtc(crtc)->plane == plane)
+ return;
+ set_attach(crtc, mdp4_plane_pipe(plane), NULL);
+}
+
+static const char *dma_names[] = {
+ "DMA_P", "DMA_S", "DMA_E",
+};
+
+/* initialize crtc */
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp4_crtc *mdp4_crtc;
+ int ret;
+
+ mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
+ if (!mdp4_crtc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc = &mdp4_crtc->base;
+
+ mdp4_crtc->plane = plane;
+ mdp4_crtc->id = id;
+
+ mdp4_crtc->ovlp = ovlp_id;
+ mdp4_crtc->dma = dma_id;
+
+ mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
+ mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
+
+ mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
+ mdp4_crtc->err.irq = mdp4_crtc_err_irq;
+
+ snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
+ dma_names[dma_id], ovlp_id);
+
+ spin_lock_init(&mdp4_crtc->cursor.lock);
+
+ ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
+ "unref fb", unref_fb_worker);
+ if (ret)
+ goto fail;
+
+ ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
+ "unref cursor", unref_cursor_worker);
+
+ INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
+ drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+
+ mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+
+ return crtc;
+
+fail:
+ if (crtc)
+ mdp4_crtc_destroy(crtc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 00000000000..067ed03b35f
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+struct mdp4_dtv_encoder {
+ struct drm_encoder base;
+ struct clk *src_clk;
+ struct clk *hdmi_clk;
+ struct clk *mdp_clk;
+ unsigned long int pixclock;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+/* not ironically named at all.. no, really.. */
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+ struct drm_device *dev = mdp4_dtv_encoder->base.dev;
+ struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
+
+ if (!dtv_pdata) {
+ dev_err(dev->dev, "could not find dtv pdata\n");
+ return;
+ }
+
+ if (dtv_pdata->bus_scale_table) {
+ mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
+ dtv_pdata->bus_scale_table);
+ DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
+ DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
+ if (dtv_pdata->lcdc_power_save)
+ dtv_pdata->lcdc_power_save(1);
+ }
+}
+
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+ if (mdp4_dtv_encoder->bsc) {
+ msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
+ mdp4_dtv_encoder->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
+{
+ if (mdp4_dtv_encoder->bsc) {
+ DBG("set bus scaling: %d", idx);
+ msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
+#endif
+
+static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ bs_fini(mdp4_dtv_encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dtv_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
+ .destroy = mdp4_dtv_encoder_destroy,
+};
+
+static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("mode=%d", mode);
+
+ if (enabled == mdp4_dtv_encoder->enabled)
+ return;
+
+ if (enabled) {
+ unsigned long pc = mdp4_dtv_encoder->pixclock;
+ int ret;
+
+ bs_set(mdp4_dtv_encoder, 1);
+
+ DBG("setting src_clk=%lu", pc);
+
+ ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+ if (ret)
+ dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
+ clk_prepare_enable(mdp4_dtv_encoder->src_clk);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+ } else {
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
+
+ clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+ bs_set(mdp4_dtv_encoder, 0);
+ }
+
+ mdp4_dtv_encoder->enabled = enabled;
+}
+
+static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ mdp4_dtv_encoder->pixclock = mode->clock * 1000;
+
+ DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
+ MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
+ MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
+ MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
+ MDP4_DTV_ACTIVE_HCTL_START(0) |
+ MDP4_DTV_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
+{
+ mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
+{
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+ mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
+ mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
+ .dpms = mdp4_dtv_encoder_dpms,
+ .mode_fixup = mdp4_dtv_encoder_mode_fixup,
+ .mode_set = mdp4_dtv_encoder_mode_set,
+ .prepare = mdp4_dtv_encoder_prepare,
+ .commit = mdp4_dtv_encoder_commit,
+};
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder;
+ int ret;
+
+ mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
+ if (!mdp4_dtv_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dtv_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
+
+ mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
+ if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
+ dev_err(dev->dev, "failed to get src_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
+ if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
+ dev_err(dev->dev, "failed to get hdmi_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+ if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
+ dev_err(dev->dev, "failed to get mdp_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
+ goto fail;
+ }
+
+ bs_init(mdp4_dtv_encoder);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dtv_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
new file mode 100644
index 00000000000..8edd531cb62
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp4_kms->error_handler;
+
+ error_handler->irq = mdp4_irq_error_handler;
+ error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+ MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), true);
+ return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
new file mode 100644
index 00000000000..0bb4faa1752
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "mdp4_kms.h"
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
+
+static int mdp4_hw_init(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp4_kms->dev;
+ uint32_t version, major, minor, dmap_cfg, vg_cfg;
+ unsigned long clk;
+ int ret = 0;
+
+ pm_runtime_get_sync(dev->dev);
+
+ mdp4_enable(mdp4_kms);
+ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+ mdp4_disable(mdp4_kms);
+
+ major = FIELD(version, MDP4_VERSION_MAJOR);
+ minor = FIELD(version, MDP4_VERSION_MINOR);
+
+ DBG("found MDP4 version v%d.%d", major, minor);
+
+ if (major != 4) {
+ dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ mdp4_kms->rev = minor;
+
+ if (mdp4_kms->dsi_pll_vdda) {
+ if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
+ ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
+ 1200000, 1200000);
+ if (ret) {
+ dev_err(dev->dev,
+ "failed to set dsi_pll_vdda voltage: %d\n", ret);
+ goto out;
+ }
+ }
+ }
+
+ if (mdp4_kms->dsi_pll_vddio) {
+ if (mdp4_kms->rev == 2) {
+ ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
+ 1800000, 1800000);
+ if (ret) {
+ dev_err(dev->dev,
+ "failed to set dsi_pll_vddio voltage: %d\n", ret);
+ goto out;
+ }
+ }
+ }
+
+ if (mdp4_kms->rev > 1) {
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
+
+ /* max read pending cmd config, 3 pending requests: */
+ mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
+
+ clk = clk_get_rate(mdp4_kms->clk);
+
+ if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
+ dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
+ vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
+ } else {
+ dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
+ vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
+ }
+
+ DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
+
+ if (mdp4_kms->rev >= 2)
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
+
+ /* disable CSC matrix / YUV by default: */
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
+
+ if (mdp4_kms->rev > 1)
+ mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+
+ return ret;
+}
+
+static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ /* if we had >1 encoder, we'd need something more clever: */
+ return mdp4_dtv_round_pixclk(encoder, rate);
+}
+
+static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static void mdp4_destroy(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ if (mdp4_kms->blank_cursor_iova)
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ if (mdp4_kms->blank_cursor_bo)
+ drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
+ kfree(mdp4_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp4_hw_init,
+ .irq_preinstall = mdp4_irq_preinstall,
+ .irq_postinstall = mdp4_irq_postinstall,
+ .irq_uninstall = mdp4_irq_uninstall,
+ .irq = mdp4_irq,
+ .enable_vblank = mdp4_enable_vblank,
+ .disable_vblank = mdp4_disable_vblank,
+ .get_format = mdp_get_format,
+ .round_pixclk = mdp4_round_pixclk,
+ .preclose = mdp4_preclose,
+ .destroy = mdp4_destroy,
+ },
+ .set_irqmask = mdp4_set_irqmask,
+};
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp4_kms->clk);
+ if (mdp4_kms->pclk)
+ clk_disable_unprepare(mdp4_kms->pclk);
+ clk_disable_unprepare(mdp4_kms->lut_clk);
+
+ return 0;
+}
+
+int mdp4_enable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp4_kms->clk);
+ if (mdp4_kms->pclk)
+ clk_prepare_enable(mdp4_kms->pclk);
+ clk_prepare_enable(mdp4_kms->lut_clk);
+
+ return 0;
+}
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct hdmi *hdmi;
+ int ret;
+
+ /*
+ * NOTE: this is a bit simplistic until we add support
+ * for more than just RGB1->DMA_E->DTV->HDMI
+ */
+
+ /* construct non-private planes: */
+ plane = mdp4_plane_init(dev, VG1, false);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev, "failed to construct plane for VG1\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ plane = mdp4_plane_init(dev, VG2, false);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev, "failed to construct plane for VG2\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ /* the CRTCs get constructed with a private plane: */
+ plane = mdp4_plane_init(dev, RGB1, true);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev, "failed to construct plane for RGB1\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
+ if (IS_ERR(crtc)) {
+ dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ encoder = mdp4_dtv_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct DTV encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+ encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ hdmi = hdmi_init(dev, encoder);
+ if (IS_ERR(hdmi)) {
+ ret = PTR_ERR(hdmi);
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static const char *iommu_ports[] = {
+ "mdp_port0_cb0", "mdp_port1_cb0",
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp4_platform_config *config = mdp4_get_config(pdev);
+ struct mdp4_kms *mdp4_kms;
+ struct msm_kms *kms = NULL;
+ struct msm_mmu *mmu;
+ int ret;
+
+ mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
+ if (!mdp4_kms) {
+ dev_err(dev->dev, "failed to allocate kms\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp_kms_init(&mdp4_kms->base, &kms_funcs);
+
+ kms = &mdp4_kms->base.base;
+
+ mdp4_kms->dev = dev;
+
+ mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
+ if (IS_ERR(mdp4_kms->mmio)) {
+ ret = PTR_ERR(mdp4_kms->mmio);
+ goto fail;
+ }
+
+ mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+ if (IS_ERR(mdp4_kms->dsi_pll_vdda))
+ mdp4_kms->dsi_pll_vdda = NULL;
+
+ mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+ if (IS_ERR(mdp4_kms->dsi_pll_vddio))
+ mdp4_kms->dsi_pll_vddio = NULL;
+
+ mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(mdp4_kms->vdd))
+ mdp4_kms->vdd = NULL;
+
+ if (mdp4_kms->vdd) {
+ ret = regulator_enable(mdp4_kms->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(mdp4_kms->clk)) {
+ dev_err(dev->dev, "failed to get core_clk\n");
+ ret = PTR_ERR(mdp4_kms->clk);
+ goto fail;
+ }
+
+ mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(mdp4_kms->pclk))
+ mdp4_kms->pclk = NULL;
+
+ // XXX if (rev >= MDP_REV_42) { ???
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ dev_err(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
+
+ clk_set_rate(mdp4_kms->clk, config->max_clk);
+ clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp4_enable(mdp4_kms);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+ mdp4_disable(mdp4_kms);
+ mdelay(16);
+
+ if (config->iommu) {
+ mmu = msm_iommu_new(dev, config->iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ goto fail;
+ }
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ goto fail;
+ } else {
+ dev_info(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ mmu = NULL;
+ }
+
+ mdp4_kms->id = msm_register_mmu(dev, mmu);
+ if (mdp4_kms->id < 0) {
+ ret = mdp4_kms->id;
+ dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
+ goto fail;
+ }
+
+ ret = modeset_init(mdp4_kms);
+ if (ret) {
+ dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+ ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+ dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ mdp4_kms->blank_cursor_bo = NULL;
+ goto fail;
+ }
+
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ &mdp4_kms->blank_cursor_iova);
+ if (ret) {
+ dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ goto fail;
+ }
+
+ return kms;
+
+fail:
+ if (kms)
+ mdp4_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
+{
+ static struct mdp4_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#else
+ if (cpu_is_apq8064())
+ config.max_clk = 266667000;
+ else
+ config.max_clk = 200000000;
+
+ config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
+#endif
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
new file mode 100644
index 00000000000..715520c54cd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP4_KMS_H__
+#define __MDP4_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "mdp4.xml.h"
+
+struct mdp4_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ int id;
+
+ void __iomem *mmio;
+
+ struct regulator *dsi_pll_vdda;
+ struct regulator *dsi_pll_vddio;
+ struct regulator *vdd;
+
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *lut_clk;
+
+ struct mdp_irq error_handler;
+
+ /* empty/blank cursor bo to use when cursor is "disabled" */
+ struct drm_gem_object *blank_cursor_bo;
+ uint32_t blank_cursor_iova;
+};
+#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp4_platform_config {
+ struct iommu_domain *iommu;
+ uint32_t max_clk;
+};
+
+static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp4_kms->mmio + reg);
+}
+
+static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
+{
+ return msm_readl(mdp4_kms->mmio + reg);
+}
+
+static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1: return MDP4_OVERLAY_FLUSH_VG1;
+ case VG2: return MDP4_OVERLAY_FLUSH_VG2;
+ case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
+ case RGB2: return MDP4_OVERLAY_FLUSH_RGB1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t ovlp2flush(int ovlp)
+{
+ switch (ovlp) {
+ case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
+ case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2irq(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_DMA_P_DONE;
+ case DMA_S: return MDP4_IRQ_DMA_S_DONE;
+ case DMA_E: return MDP4_IRQ_DMA_E_DONE;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2err(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
+ case DMA_S: return 0; // ???
+ case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ uint32_t mixer_cfg = 0;
+
+ switch (pipe) {
+ case VG1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ break;
+ case VG2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ break;
+ case RGB1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ break;
+ case RGB2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ break;
+ case RGB3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ break;
+ case VG3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ break;
+ case VG4:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ break;
+ default:
+ WARN_ON("invalid pipe");
+ break;
+ }
+
+ return mixer_cfg;
+}
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms);
+int mdp4_enable(struct mdp4_kms *mdp4_kms);
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp4_irq_preinstall(struct msm_kms *kms);
+int mdp4_irq_postinstall(struct msm_kms *kms);
+void mdp4_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp4_irq(struct msm_kms *kms);
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
+ uint32_t max_formats)
+{
+ /* TODO when we have YUV, we need to filter supported formats
+ * based on pipe_id..
+ */
+ return mdp_get_formats(pixel_formats, max_formats);
+}
+
+void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mdp4_pipe pipe_id, bool private_plane);
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id);
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static inline int match_dev_name(struct device *dev, void *data)
+{
+ return !strcmp(dev_name(dev), data);
+}
+/* bus scaling data is associated with extra pointless platform devices,
+ * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
+ * to find their pdata to make the bus-scaling stuff work.
+ */
+static inline void *mdp4_find_pdata(const char *devname)
+{
+ struct device *dev;
+ dev = bus_find_device(&platform_bus_type, NULL,
+ (void *)devname, match_dev_name);
+ return dev ? dev->platform_data : NULL;
+}
+#endif
+
+#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
new file mode 100644
index 00000000000..66f33dba1eb
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+
+struct mdp4_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mdp4_pipe pipe;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
+
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static int mdp4_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ mdp4_plane->enabled = true;
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ drm_framebuffer_reference(fb);
+
+ return mdp4_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static int mdp4_plane_disable(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ DBG("%s: disable", mdp4_plane->name);
+ if (plane->crtc)
+ mdp4_crtc_detach(plane->crtc, plane);
+ return 0;
+}
+
+static void mdp4_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ mdp4_plane_disable(plane);
+ drm_plane_cleanup(plane);
+
+ kfree(mdp4_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+int mdp4_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp4_plane_funcs = {
+ .update_plane = mdp4_plane_update,
+ .disable_plane = mdp4_plane_disable,
+ .destroy = mdp4_plane_destroy,
+ .set_property = mdp4_plane_set_property,
+};
+
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
+ uint32_t iova;
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
+ MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
+ MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
+
+ plane->fb = fb;
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
+
+int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
+ const struct mdp_format *format;
+ uint32_t op_mode = 0;
+ uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ if (src_w != crtc_w) {
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
+ /* TODO calc phasex_step */
+ }
+
+ if (src_h != crtc_h) {
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
+ /* TODO calc phasey_step */
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
+ MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
+ MDP4_PIPE_SRC_XY_X(src_x) |
+ MDP4_PIPE_SRC_XY_Y(src_y));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
+ MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
+ MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
+
+ mdp4_plane_set_scanout(plane, fb);
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
+ MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
+ MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
+
+ /* TODO detach from old crtc (if we had more than one) */
+ mdp4_crtc_attach(crtc, plane);
+
+ return 0;
+}
+
+static const char *pipe_names[] = {
+ "VG1", "VG2",
+ "RGB1", "RGB2", "RGB3",
+ "VG3", "VG4",
+};
+
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ return mdp4_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mdp4_pipe pipe_id, bool private_plane)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp4_plane *mdp4_plane;
+ int ret;
+ enum drm_plane_type type;
+
+ mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
+ if (!mdp4_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp4_plane->base;
+
+ mdp4_plane->pipe = pipe_id;
+ mdp4_plane->name = pipe_names[pipe_id];
+
+ mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
+ ARRAY_SIZE(mdp4_plane->formats));
+
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats,
+ type);
+
+ mdp4_plane_install_properties(plane, &plane->base);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp4_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
new file mode 100644
index 00000000000..0aa51517f82
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -0,0 +1,1036 @@
+#ifndef MDP5_XML
+#define MDP5_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp5_intf {
+ INTF_DSI = 1,
+ INTF_HDMI = 3,
+ INTF_LCDC = 5,
+ INTF_eDP = 9,
+};
+
+enum mdp5_intfnum {
+ NO_INTF = 0,
+ INTF0 = 1,
+ INTF1 = 2,
+ INTF2 = 3,
+ INTF3 = 4,
+};
+
+enum mdp5_pipe {
+ SSPP_VIG0 = 0,
+ SSPP_VIG1 = 1,
+ SSPP_VIG2 = 2,
+ SSPP_RGB0 = 3,
+ SSPP_RGB1 = 4,
+ SSPP_RGB2 = 5,
+ SSPP_DMA0 = 6,
+ SSPP_DMA1 = 7,
+};
+
+enum mdp5_ctl_mode {
+ MODE_NONE = 0,
+ MODE_ROT0 = 1,
+ MODE_ROT1 = 2,
+ MODE_WB0 = 3,
+ MODE_WB1 = 4,
+ MODE_WFD = 5,
+};
+
+enum mdp5_pack_3d {
+ PACK_3D_FRAME_INT = 0,
+ PACK_3D_H_ROW_INT = 1,
+ PACK_3D_V_ROW_INT = 2,
+ PACK_3D_COL_INT = 3,
+};
+
+enum mdp5_chroma_samp_type {
+ CHROMA_RGB = 0,
+ CHROMA_H2V1 = 1,
+ CHROMA_H1V2 = 2,
+ CHROMA_420 = 3,
+};
+
+enum mdp5_scale_filter {
+ SCALE_FILTER_NEAREST = 0,
+ SCALE_FILTER_BIL = 1,
+ SCALE_FILTER_PCMN = 2,
+ SCALE_FILTER_CA = 3,
+};
+
+enum mdp5_pipe_bwc {
+ BWC_LOSSLESS = 0,
+ BWC_Q_HIGH = 1,
+ BWC_Q_MED = 2,
+};
+
+enum mdp5_client_id {
+ CID_UNUSED = 0,
+ CID_VIG0_Y = 1,
+ CID_VIG0_CR = 2,
+ CID_VIG0_CB = 3,
+ CID_VIG1_Y = 4,
+ CID_VIG1_CR = 5,
+ CID_VIG1_CB = 6,
+ CID_VIG2_Y = 7,
+ CID_VIG2_CR = 8,
+ CID_VIG2_CB = 9,
+ CID_DMA0_Y = 10,
+ CID_DMA0_CR = 11,
+ CID_DMA0_CB = 12,
+ CID_DMA1_Y = 13,
+ CID_DMA1_CR = 14,
+ CID_DMA1_CB = 15,
+ CID_RGB0 = 16,
+ CID_RGB1 = 17,
+ CID_RGB2 = 18,
+ CID_MAX = 19,
+};
+
+enum mdp5_igc_type {
+ IGC_VIG = 0,
+ IGC_RGB = 1,
+ IGC_DMA = 2,
+ IGC_DSPP = 3,
+};
+
+#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
+#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
+#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
+#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008
+#define MDP5_IRQ_INTF0_WB_WFD 0x00000010
+#define MDP5_IRQ_INTF1_WB_WFD 0x00000020
+#define MDP5_IRQ_INTF2_WB_WFD 0x00000040
+#define MDP5_IRQ_INTF3_WB_WFD 0x00000080
+#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100
+#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200
+#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400
+#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800
+#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000
+#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000
+#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000
+#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000
+#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000
+#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000
+#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000
+#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
+#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
+#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
+#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
+#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
+#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
+#define MDP5_IRQ_INTF0_VSYNC 0x02000000
+#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
+#define MDP5_IRQ_INTF1_VSYNC 0x08000000
+#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000
+#define MDP5_IRQ_INTF2_VSYNC 0x20000000
+#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
+#define MDP5_IRQ_INTF3_VSYNC 0x80000000
+#define REG_MDP5_HW_VERSION 0x00000000
+
+#define REG_MDP5_HW_INTR_STATUS 0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000
+
+#define REG_MDP5_MDP_VERSION 0x00000100
+#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000
+#define MDP5_MDP_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_MDP_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP5_DISP_INTF_SEL 0x00000104
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+}
+
+#define REG_MDP5_INTR_EN 0x00000110
+
+#define REG_MDP5_INTR_STATUS 0x00000114
+
+#define REG_MDP5_INTR_CLEAR 0x00000118
+
+#define REG_MDP5_HIST_INTR_EN 0x0000011c
+
+#define REG_MDP5_HIST_INTR_STATUS 0x00000120
+
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000124
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
+{
+ switch (idx) {
+ case IGC_VIG: return 0x00000300;
+ case IGC_RGB: return 0x00000310;
+ case IGC_DMA: return 0x00000320;
+ case IGC_DSPP: return 0x00000400;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+{
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+}
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
+#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
+#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
+#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
+#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
+#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
+#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
+#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
+#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
+#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+#define MDP5_CTL_OP_MODE__MASK 0x0000000f
+#define MDP5_CTL_OP_MODE__SHIFT 0
+static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
+{
+ return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
+}
+#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070
+#define MDP5_CTL_OP_INTF_NUM__SHIFT 4
+static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
+{
+ return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
+}
+#define MDP5_CTL_OP_CMD_MODE 0x00020000
+#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000
+#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000
+#define MDP5_CTL_OP_PACK_3D__SHIFT 20
+static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
+{
+ return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+#define MDP5_CTL_FLUSH_VIG0 0x00000001
+#define MDP5_CTL_FLUSH_VIG1 0x00000002
+#define MDP5_CTL_FLUSH_VIG2 0x00000004
+#define MDP5_CTL_FLUSH_RGB0 0x00000008
+#define MDP5_CTL_FLUSH_RGB1 0x00000010
+#define MDP5_CTL_FLUSH_RGB2 0x00000020
+#define MDP5_CTL_FLUSH_LM0 0x00000040
+#define MDP5_CTL_FLUSH_LM1 0x00000080
+#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_DMA0 0x00000800
+#define MDP5_CTL_FLUSH_DMA1 0x00001000
+#define MDP5_CTL_FLUSH_DSPP0 0x00002000
+#define MDP5_CTL_FLUSH_DSPP1 0x00004000
+#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_CTL 0x00020000
+
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
+}
+#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
+#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
+#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
+static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
+{
+ return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
+}
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
+
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
+#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
+static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
+}
+#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00
+#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8
+static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
+
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004
+#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010
+#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400
+#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
+#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
+static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
+{
+ return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
+}
+#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010
+#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100
+#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000
+#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000
+#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000
+#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000
+#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
+#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
+
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
+#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
+}
+#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000
+#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
+}
+#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
+#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
+}
+#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000
+#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
+#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
+#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
+
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+
+
+#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
new file mode 100644
index 00000000000..ebe2e60f3ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp5_crtc {
+ struct drm_crtc base;
+ char name[8];
+ struct drm_plane *plane;
+ struct drm_plane *planes[8];
+ int id;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
+ struct drm_framebuffer *fb;
+
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
+ /* for unref'ing framebuffers after scanout completes: */
+ struct drm_flip_work unref_fb_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+};
+#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+
+static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ atomic_or(pending, &mdp5_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int id = mdp5_crtc->id;
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+ struct drm_plane *plane = mdp5_crtc->planes[i];
+ if (plane) {
+ enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
+ flush |= pipe2flush(pipe);
+ }
+ }
+ flush |= mixer2flush(mdp5_crtc->id);
+ flush |= MDP5_CTL_FLUSH_CTL;
+
+ DBG("%s: flush=%08x", mdp5_crtc->name, flush);
+
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp5_crtc->fb;
+
+ /* grab reference to incoming scanout fb: */
+ drm_framebuffer_reference(new_fb);
+ mdp5_crtc->base.primary->fb = new_fb;
+ mdp5_crtc->fb = new_fb;
+
+ if (old_fb)
+ drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp5_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
+ mdp5_crtc->scanout_fb);
+
+ mdp5_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags, i;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp5_crtc->event;
+ if (event) {
+ /* if regular vblank case (!file) or if cancel-flip from
+ * preclose on file that requested flip, then send the
+ * event:
+ */
+ if (!file || (event->base.file_priv == file)) {
+ mdp5_crtc->event = NULL;
+ drm_send_vblank_event(dev, mdp5_crtc->id, event);
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+ struct drm_plane *plane = mdp5_crtc->planes[i];
+ if (plane)
+ mdp5_plane_complete_flip(plane);
+ }
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(cb, struct mdp5_crtc, pageflip_cb);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct drm_framebuffer *fb = mdp5_crtc->fb;
+
+ if (!fb)
+ return;
+
+ drm_framebuffer_reference(fb);
+ mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
+ update_scanout(crtc, fb);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(work, struct mdp5_crtc, unref_fb_work);
+ struct drm_device *dev = mdp5_crtc->base.dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
+
+ kfree(mdp5_crtc);
+}
+
+static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("%s: mode=%d", mdp5_crtc->name, mode);
+
+ if (enabled != mdp5_crtc->enabled) {
+ if (enabled) {
+ mdp5_enable(mdp5_kms);
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+ } else {
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+ mdp5_disable(mdp5_kms);
+ }
+ mdp5_crtc->enabled = enabled;
+ }
+}
+
+static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int id = mdp5_crtc->id;
+
+ /*
+ * Hard-coded setup for now until I figure out how the
+ * layer-mixer works
+ */
+
+ /* LM[id]: */
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
+ MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
+ MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
+
+ /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
+ * we want to be setting CTL[m].LAYER[n]. Not sure what the
+ * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
+ * used when chaining up mixers for high resolution displays?
+ */
+
+ /* CTL[id]: */
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
+ MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
+ MDP5_CTL_LAYER_REG_BORDER_COLOR);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+}
+
+static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int ret;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mdp5_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->primary->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp5_crtc->name, ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+ MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
+
+ return 0;
+}
+
+static void mdp5_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ DBG("%s", mdp5_crtc->name);
+ /* make sure we hold a ref to mdp clks while setting up mode: */
+ mdp5_enable(get_kms(crtc));
+ mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_crtc_commit(struct drm_crtc *crtc)
+{
+ mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ crtc_flush(crtc);
+ /* drop the ref to mdp clk's that we got in prepare: */
+ mdp5_disable(get_kms(crtc));
+}
+
+static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_plane *plane = mdp5_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+ int ret;
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->primary->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->primary->fb);
+ update_scanout(crtc, crtc->primary->fb);
+
+ return 0;
+}
+
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *obj;
+ unsigned long flags;
+
+ if (mdp5_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
+ }
+
+ obj = msm_framebuffer_bo(new_fb, 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp5_crtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ update_fb(crtc, new_fb);
+
+ return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+}
+
+static int mdp5_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_crtc_funcs mdp5_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = mdp5_crtc_page_flip,
+ .set_property = mdp5_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
+ .dpms = mdp5_crtc_dpms,
+ .mode_fixup = mdp5_crtc_mode_fixup,
+ .mode_set = mdp5_crtc_mode_set,
+ .prepare = mdp5_crtc_prepare,
+ .commit = mdp5_crtc_commit,
+ .mode_set_base = mdp5_crtc_mode_set_base,
+ .load_lut = mdp5_crtc_load_lut,
+};
+
+static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+
+ pending = atomic_xchg(&mdp5_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
+ }
+}
+
+static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return mdp5_crtc->vblank.irqmask;
+}
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ DBG("cancel: %p", file);
+ complete_flip(crtc, file);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+ enum mdp5_intf intf_id)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ static const enum mdp5_intfnum intfnum[] = {
+ INTF0, INTF1, INTF2, INTF3,
+ };
+ uint32_t intf_sel;
+
+ /* now that we know what irq's we want: */
+ mdp5_crtc->err.irqmask = intf2err(intf);
+ mdp5_crtc->vblank.irqmask = intf2vblank(intf);
+
+ /* when called from modeset_init(), skip the rest until later: */
+ if (!mdp5_kms)
+ return;
+
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+
+ switch (intf) {
+ case 0:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
+ MDP5_CTL_OP_MODE(MODE_NONE) |
+ MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+
+ crtc_flush(crtc);
+}
+
+static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
+ struct drm_plane *plane)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+
+ if (mdp5_crtc->planes[pipe_id] == plane)
+ return;
+
+ mdp5_crtc->planes[pipe_id] = plane;
+ blend_setup(crtc);
+ if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
+ crtc_flush(crtc);
+}
+
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp5_plane_pipe(plane), plane);
+}
+
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ /* don't actually detatch our primary plane: */
+ if (to_mdp5_crtc(crtc)->plane == plane)
+ return;
+ set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+}
+
+/* initialize crtc */
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp5_crtc *mdp5_crtc;
+ int ret;
+
+ mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
+ if (!mdp5_crtc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc = &mdp5_crtc->base;
+
+ mdp5_crtc->plane = plane;
+ mdp5_crtc->id = id;
+
+ mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
+ mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+
+ snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
+ pipe2name(mdp5_plane_pipe(plane)), id);
+
+ ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
+ "unref fb", unref_fb_worker);
+ if (ret)
+ goto fail;
+
+ INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
+ drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+
+ mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+
+ return crtc;
+
+fail:
+ if (crtc)
+ mdp5_crtc_destroy(crtc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
new file mode 100644
index 00000000000..edec7bfaa95
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_encoder {
+ struct drm_encoder base;
+ int intf;
+ enum mdp5_intf intf_id;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[0],
+}, {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_encoder *mdp5_encoder)
+{
+ mdp5_encoder->bsc = msm_bus_scale_register_client(
+ &mdp_bus_scale_table);
+ DBG("bus scale client: %08x", mdp5_encoder->bsc);
+}
+
+static void bs_fini(struct mdp5_encoder *mdp5_encoder)
+{
+ if (mdp5_encoder->bsc) {
+ msm_bus_scale_unregister_client(mdp5_encoder->bsc);
+ mdp5_encoder->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
+{
+ if (mdp5_encoder->bsc) {
+ DBG("set bus scaling: %d", idx);
+ /* HACK: scaling down, and then immediately back up
+ * seems to leave things broken (underflow).. so
+ * never disable:
+ */
+ idx = 1;
+ msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
+#endif
+
+static void mdp5_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ bs_fini(mdp5_encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_encoder_funcs = {
+ .destroy = mdp5_encoder_destroy,
+};
+
+static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("mode=%d", mode);
+
+ if (enabled == mdp5_encoder->enabled)
+ return;
+
+ if (enabled) {
+ bs_set(mdp5_encoder, 1);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ } else {
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ bs_set(mdp5_encoder, 0);
+ }
+
+ mdp5_encoder->enabled = enabled;
+}
+
+static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+ uint32_t format;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+ format = 0x213f; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
+ MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
+ MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
+ MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
+ MDP5_INTF_ACTIVE_HCTL_START(0) |
+ MDP5_INTF_ACTIVE_HCTL_END(0));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
+}
+
+static void mdp5_encoder_prepare(struct drm_encoder *encoder)
+{
+ mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_encoder_commit(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
+ mdp5_encoder->intf_id);
+ mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
+ .dpms = mdp5_encoder_dpms,
+ .mode_fixup = mdp5_encoder_mode_fixup,
+ .mode_set = mdp5_encoder_mode_set,
+ .prepare = mdp5_encoder_prepare,
+ .commit = mdp5_encoder_commit,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+ enum mdp5_intf intf_id)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_encoder *mdp5_encoder;
+ int ret;
+
+ mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
+ if (!mdp5_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp5_encoder->intf = intf;
+ mdp5_encoder->intf_id = intf_id;
+ encoder = &mdp5_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
+
+ bs_init(mdp5_encoder);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
new file mode 100644
index 00000000000..f2b985bc2ad
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+}
+
+static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp5_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp5_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp5_kms->error_handler;
+
+ error_handler->irq = mdp5_irq_error_handler;
+ error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
+ MDP5_IRQ_INTF1_UNDER_RUN |
+ MDP5_IRQ_INTF2_UNDER_RUN |
+ MDP5_IRQ_INTF3_UNDER_RUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp5_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+}
+
+static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp5_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+}
+
+irqreturn_t mdp5_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ uint32_t intr;
+
+ intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+
+ VERB("intr=%08x", intr);
+
+ if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+ mdp5_irq_mdp(mdp_kms);
+
+ if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
+ hdmi_irq(0, mdp5_kms->hdmi);
+
+ return IRQ_HANDLED;
+}
+
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), true);
+ return 0;
+}
+
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
new file mode 100644
index 00000000000..71510ee26e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "mdp5_kms.h"
+
+static const char *iommu_ports[] = {
+ "mdp_0",
+};
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp5_kms->dev;
+ uint32_t version, major, minor;
+ int ret = 0;
+
+ pm_runtime_get_sync(dev->dev);
+
+ mdp5_enable(mdp5_kms);
+ version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ mdp5_disable(mdp5_kms);
+
+ major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+ minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+ DBG("found MDP5 version v%d.%d", major, minor);
+
+ if ((major != 1) || ((minor != 0) && (minor != 2))) {
+ dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ mdp5_kms->rev = minor;
+
+ /* Magic unknown register writes:
+ *
+ * W VBIF:0x004 00000001 (mdss_mdp.c:839)
+ * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
+ * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
+ * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
+ * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
+ *
+ * Downstream fbdev driver gets these register offsets/values
+ * from DT.. not really sure what these registers are or if
+ * different values for different boards/SoC's, etc. I guess
+ * they are the golden registers.
+ *
+ * Not setting these does not seem to cause any problem. But
+ * we may be getting lucky with the bootloader initializing
+ * them for us. OTOH, if we can always count on the bootloader
+ * setting the golden registers, then perhaps we don't need to
+ * care.
+ */
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+
+ return ret;
+}
+
+static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static void mdp5_destroy(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_mmu *mmu = mdp5_kms->mmu;
+
+ if (mmu) {
+ mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ }
+ kfree(mdp5_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp5_hw_init,
+ .irq_preinstall = mdp5_irq_preinstall,
+ .irq_postinstall = mdp5_irq_postinstall,
+ .irq_uninstall = mdp5_irq_uninstall,
+ .irq = mdp5_irq,
+ .enable_vblank = mdp5_enable_vblank,
+ .disable_vblank = mdp5_disable_vblank,
+ .get_format = mdp_get_format,
+ .round_pixclk = mdp5_round_pixclk,
+ .preclose = mdp5_preclose,
+ .destroy = mdp5_destroy,
+ },
+ .set_irqmask = mdp5_set_irqmask,
+};
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp5_kms->ahb_clk);
+ clk_disable_unprepare(mdp5_kms->axi_clk);
+ clk_disable_unprepare(mdp5_kms->core_clk);
+ clk_disable_unprepare(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+int mdp5_enable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp5_kms->ahb_clk);
+ clk_prepare_enable(mdp5_kms->axi_clk);
+ clk_prepare_enable(mdp5_kms->core_clk);
+ clk_prepare_enable(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+static int modeset_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe crtcs[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+ };
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ int i, ret;
+
+ /* construct CRTCs: */
+ for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = mdp5_plane_init(dev, crtcs[i], true);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
+ pipe2name(crtcs[i]), ret);
+ goto fail;
+ }
+
+ crtc = mdp5_crtc_init(dev, plane, i);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
+ pipe2name(crtcs[i]), ret);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* Construct encoder for HDMI: */
+ encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+
+ /* NOTE: the vsync and error irq's are actually associated with
+ * the INTF/encoder.. the easiest way to deal with this (ie. what
+ * we do now) is assume a fixed relationship between crtc's and
+ * encoders. I'm not sure if there is ever a need to more freely
+ * assign crtcs to encoders, but if there is then we need to take
+ * care of error and vblank irq's that the crtc has registered,
+ * and also update user-requested vblank_mask.
+ */
+ encoder->possible_crtcs = BIT(0);
+ mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ /* Construct bridge/connector for HDMI: */
+ mdp5_kms->hdmi = hdmi_init(dev, encoder);
+ if (IS_ERR(mdp5_kms->hdmi)) {
+ ret = PTR_ERR(mdp5_kms->hdmi);
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+ const char *name)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ *clkp = clk;
+ return 0;
+}
+
+struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp5_platform_config *config = mdp5_get_config(pdev);
+ struct mdp5_kms *mdp5_kms;
+ struct msm_kms *kms = NULL;
+ struct msm_mmu *mmu;
+ int ret;
+
+ mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ dev_err(dev->dev, "failed to allocate kms\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+ kms = &mdp5_kms->base.base;
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
+ goto fail;
+ }
+
+ mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_kms->vbif)) {
+ ret = PTR_ERR(mdp5_kms->vbif);
+ goto fail;
+ }
+
+ mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(mdp5_kms->vdd)) {
+ ret = PTR_ERR(mdp5_kms->vdd);
+ goto fail;
+ }
+
+ ret = regulator_enable(mdp5_kms->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
+ if (ret)
+ goto fail;
+
+ ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+ mdp5_disable(mdp5_kms);
+ mdelay(16);
+
+ if (config->iommu) {
+ mmu = msm_iommu_new(dev, config->iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ dev_err(dev->dev, "failed to init iommu: %d\n", ret);
+ goto fail;
+ }
+
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ } else {
+ dev_info(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ mmu = NULL;
+ }
+ mdp5_kms->mmu = mmu;
+
+ mdp5_kms->id = msm_register_mmu(dev, mmu);
+ if (mdp5_kms->id < 0) {
+ ret = mdp5_kms->id;
+ dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
+ goto fail;
+ }
+
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ return kms;
+
+fail:
+ if (kms)
+ mdp5_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
+{
+ static struct mdp5_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#endif
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
new file mode 100644
index 00000000000..6e981b692d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_KMS_H__
+#define __MDP5_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "mdp5.xml.h"
+#include "mdp5_smp.h"
+
+struct mdp5_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ int id;
+ struct msm_mmu *mmu;
+
+ /* for tracking smp allocation amongst pipes: */
+ mdp5_smp_state_t smp_state;
+ struct mdp5_client_smp_state smp_client_state[CID_MAX];
+ int smp_blk_cnt;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif;
+
+ struct regulator *vdd;
+
+ struct clk *axi_clk;
+ struct clk *ahb_clk;
+ struct clk *src_clk;
+ struct clk *core_clk;
+ struct clk *lut_clk;
+ struct clk *vsync_clk;
+
+ struct hdmi *hdmi;
+
+ struct mdp_irq error_handler;
+};
+#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_platform_config {
+ struct iommu_domain *iommu;
+ uint32_t max_clk;
+ int smp_blk_cnt;
+};
+
+static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp5_kms->mmio + reg);
+}
+
+static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
+{
+ return msm_readl(mdp5_kms->mmio + reg);
+}
+
+static inline const char *pipe2name(enum mdp5_pipe pipe)
+{
+ static const char *names[] = {
+#define NAME(n) [SSPP_ ## n] = #n
+ NAME(VIG0), NAME(VIG1), NAME(VIG2),
+ NAME(RGB0), NAME(RGB1), NAME(RGB2),
+ NAME(DMA0), NAME(DMA1),
+#undef NAME
+ };
+ return names[pipe];
+}
+
+static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ default: return 0;
+ }
+}
+
+static inline int pipe2nclients(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_RGB0:
+ case SSPP_RGB1:
+ case SSPP_RGB2:
+ return 1;
+ default:
+ return 3;
+ }
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+ WARN_ON(plane >= pipe2nclients(pipe));
+ switch (pipe) {
+ case SSPP_VIG0: return CID_VIG0_Y + plane;
+ case SSPP_VIG1: return CID_VIG1_Y + plane;
+ case SSPP_VIG2: return CID_VIG2_Y + plane;
+ case SSPP_RGB0: return CID_RGB0;
+ case SSPP_RGB1: return CID_RGB1;
+ case SSPP_RGB2: return CID_RGB2;
+ case SSPP_DMA0: return CID_DMA0_Y + plane;
+ case SSPP_DMA1: return CID_DMA1_Y + plane;
+ default: return CID_UNUSED;
+ }
+}
+
+static inline uint32_t mixer2flush(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2err(int intf)
+{
+ switch (intf) {
+ case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
+ case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
+ case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
+ case 3: return MDP5_IRQ_INTF3_UNDER_RUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2vblank(int intf)
+{
+ switch (intf) {
+ case 0: return MDP5_IRQ_INTF0_VSYNC;
+ case 1: return MDP5_IRQ_INTF1_VSYNC;
+ case 2: return MDP5_IRQ_INTF2_VSYNC;
+ case 3: return MDP5_IRQ_INTF3_VSYNC;
+ default: return 0;
+ }
+}
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms);
+int mdp5_enable(struct mdp5_kms *mdp5_kms);
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_irq_preinstall(struct msm_kms *kms);
+int mdp5_irq_postinstall(struct msm_kms *kms);
+void mdp5_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp5_irq(struct msm_kms *kms);
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline
+uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
+ uint32_t max_formats)
+{
+ /* TODO when we have YUV, we need to filter supported formats
+ * based on pipe id..
+ */
+ return mdp_get_formats(pixel_formats, max_formats);
+}
+
+void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+void mdp5_plane_complete_flip(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum mdp5_pipe pipe, bool private_plane);
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+ enum mdp5_intf intf_id);
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id);
+
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+ enum mdp5_intf intf_id);
+
+#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
new file mode 100644
index 00000000000..f3daec4412a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+
+struct mdp5_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mdp5_pipe pipe;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static int mdp5_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ mdp5_plane->enabled = true;
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ drm_framebuffer_reference(fb);
+
+ return mdp5_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static int mdp5_plane_disable(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ int i;
+
+ DBG("%s: disable", mdp5_plane->name);
+
+ /* update our SMP request to zero (release all our blks): */
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
+
+ /* TODO detaching now will cause us not to get the last
+ * vblank and mdp5_smp_commit().. so other planes will
+ * still see smp blocks previously allocated to us as
+ * in-use..
+ */
+ if (plane->crtc)
+ mdp5_crtc_detach(plane->crtc, plane);
+
+ return 0;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct msm_drm_private *priv = plane->dev->dev_private;
+
+ if (priv->kms)
+ mdp5_plane_disable(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+int mdp5_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+ .update_plane = mdp5_plane_update,
+ .disable_plane = mdp5_plane_disable,
+ .destroy = mdp5_plane_destroy,
+ .set_property = mdp5_plane_set_property,
+};
+
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
+ uint32_t iova[4];
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
+ msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
+ }
+ for (; i < 4; i++)
+ iova[i] = 0;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
+
+ plane->fb = fb;
+}
+
+/* NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
+ uint32_t nplanes, uint32_t width)
+{
+ struct drm_device *dev = plane->dev;
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ int i, hsub, nlines, nblks, ret;
+
+ hsub = drm_format_horz_chroma_subsampling(format);
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ for (i = 0, nblks = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = drm_format_plane_cpp(format, i);
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+
+ /* for hw rev v1.00 */
+ if (mdp5_kms->rev == 0)
+ n = roundup_pow_of_two(n);
+
+ DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
+ ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
+ if (ret) {
+ dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ nblks += n;
+ }
+
+ /* in success case, return total # of blocks allocated: */
+ return nblks;
+}
+
+static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ uint32_t val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+
+}
+
+int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ const struct mdp_format *format;
+ uint32_t nplanes, config = 0;
+ uint32_t phasex_step = 0, phasey_step = 0;
+ uint32_t hdecm = 0, vdecm = 0;
+ int i, nblks;
+
+ nplanes = drm_format_num_planes(fb->pixel_format);
+
+ /* bad formats should already be rejected: */
+ if (WARN_ON(nplanes > pipe2nclients(pipe)))
+ return -EINVAL;
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ /*
+ * Calculate and request required # of smp blocks:
+ */
+ nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
+ if (nblks < 0)
+ return nblks;
+
+ /*
+ * Currently we update the hw for allocations/requests immediately,
+ * but once atomic modeset/pageflip is in place, the allocation
+ * would move into atomic->check_plane_state(), while updating the
+ * hw would remain here:
+ */
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+
+ if (src_w != crtc_w) {
+ config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
+ /* TODO calc phasex_step, hdecm */
+ }
+
+ if (src_h != crtc_h) {
+ config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
+ /* TODO calc phasey_step, vdecm */
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_plane_set_scanout(plane, fb);
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
+
+ set_fifo_thresholds(plane, nblks);
+
+ /* TODO detach from old crtc (if we had more than one) */
+ mdp5_crtc_attach(crtc, plane);
+
+ return 0;
+}
+
+void mdp5_plane_complete_flip(struct drm_plane *plane)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
+ int i;
+
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+}
+
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ return mdp5_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum mdp5_pipe pipe, bool private_plane)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp5_plane *mdp5_plane;
+ int ret;
+ enum drm_plane_type type;
+
+ mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+ if (!mdp5_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp5_plane->base;
+
+ mdp5_plane->pipe = pipe;
+ mdp5_plane->name = pipe2name(pipe);
+
+ mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats));
+
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ type);
+
+ mdp5_plane_install_properties(plane, &plane->base);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp5_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
new file mode 100644
index 00000000000..2d0236b963a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "mdp5_kms.h"
+#include "mdp5_smp.h"
+
+
+/* SMP - Shared Memory Pool
+ *
+ * These are shared between all the clients, where each plane in a
+ * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * For each block, it can be either free, or pending/in-use by a
+ * client. The updates happen in three steps:
+ *
+ * 1) mdp5_smp_request():
+ * When plane scanout is setup, calculate required number of
+ * blocks needed per client, and request. Blocks not inuse or
+ * pending by any other client are added to client's pending
+ * set.
+ *
+ * 2) mdp5_smp_configure():
+ * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ * are configured for the union(pending, inuse)
+ *
+ * 3) mdp5_smp_commit():
+ * After next vblank, copy pending -> inuse. Optionally update
+ * MDP5_SMP_ALLOC registers if there are newly unused blocks
+ *
+ * On the next vblank after changes have been committed to hw, the
+ * client's pending blocks become it's in-use blocks (and no-longer
+ * in-use blocks become available to other clients).
+ *
+ * btw, hurray for confusing overloaded acronyms! :-/
+ *
+ * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
+ * should happen at (or before)? atomic->check(). And we'd need
+ * an API to discard previous requests if update is aborted or
+ * (test-only).
+ *
+ * TODO would perhaps be nice to have debugfs to dump out kernel
+ * inuse and pending state of all clients..
+ */
+
+static DEFINE_SPINLOCK(smp_lock);
+
+
+/* step #1: update # of blocks pending for the client: */
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+ enum mdp5_client_id cid, int nblks)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_lock, flags);
+
+ avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+ if (nblks > avail) {
+ ret = -ENOSPC;
+ goto fail;
+ }
+
+ cur_nblks = bitmap_weight(ps->pending, cnt);
+ if (nblks > cur_nblks) {
+ /* grow the existing pending reservation: */
+ for (i = cur_nblks; i < nblks; i++) {
+ int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+ set_bit(blk, ps->pending);
+ set_bit(blk, mdp5_kms->smp_state);
+ }
+ } else {
+ /* shrink the existing pending reservation: */
+ for (i = cur_nblks; i > nblks; i--) {
+ int blk = find_first_bit(ps->pending, cnt);
+ clear_bit(blk, ps->pending);
+ /* don't clear in global smp_state until _commit() */
+ }
+ }
+
+fail:
+ spin_unlock_irqrestore(&smp_lock, flags);
+ return 0;
+}
+
+static void update_smp_state(struct mdp5_kms *mdp5_kms,
+ enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+{
+ int cnt = mdp5_kms->smp_blk_cnt;
+ uint32_t blk, val;
+
+ for_each_set_bit(blk, *assigned, cnt) {
+ int idx = blk / 3;
+ int fld = blk % 3;
+
+ val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+
+ switch (fld) {
+ case 0:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ break;
+ case 1:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ break;
+ case 2:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ }
+}
+
+/* step #2: configure hw for union(pending, inuse): */
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int cnt = mdp5_kms->smp_blk_cnt;
+ mdp5_smp_state_t assigned;
+
+ bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ update_smp_state(mdp5_kms, cid, &assigned);
+}
+
+/* step #3: after vblank, copy pending -> inuse: */
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int cnt = mdp5_kms->smp_blk_cnt;
+ mdp5_smp_state_t released;
+
+ /*
+ * Figure out if there are any blocks we where previously
+ * using, which can be released and made available to other
+ * clients:
+ */
+ if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_lock, flags);
+ /* clear released blocks: */
+ bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
+ released, cnt);
+ spin_unlock_irqrestore(&smp_lock, flags);
+
+ update_smp_state(mdp5_kms, CID_UNUSED, &released);
+ }
+
+ bitmap_copy(ps->inuse, ps->pending, cnt);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
new file mode 100644
index 00000000000..0ab739e1a1d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_SMP_H__
+#define __MDP5_SMP_H__
+
+#include "msm_drv.h"
+
+#define MAX_SMP_BLOCKS 22
+#define SMP_BLK_SIZE 4096
+#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+struct mdp5_client_smp_state {
+ mdp5_smp_state_t inuse;
+ mdp5_smp_state_t pending;
+};
+
+struct mdp5_kms;
+
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+
+
+#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
new file mode 100644
index 00000000000..a9629b85b98
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -0,0 +1,78 @@
+#ifndef MDP_COMMON_XML
+#define MDP_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp_mixer_stage_id {
+ STAGE_UNUSED = 0,
+ STAGE_BASE = 1,
+ STAGE0 = 2,
+ STAGE1 = 3,
+ STAGE2 = 4,
+ STAGE3 = 5,
+};
+
+enum mdp_alpha_type {
+ FG_CONST = 0,
+ BG_CONST = 1,
+ FG_PIXEL = 2,
+ BG_PIXEL = 3,
+};
+
+enum mdp_bpc {
+ BPC1 = 0,
+ BPC5 = 1,
+ BPC6 = 2,
+ BPC8 = 3,
+};
+
+enum mdp_bpc_alpha {
+ BPC1A = 0,
+ BPC4A = 1,
+ BPC6A = 2,
+ BPC8A = 3,
+};
+
+
+#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
new file mode 100644
index 00000000000..e0a6ffbe6ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
+ .base = { .pixel_format = DRM_FORMAT_ ## name }, \
+ .bpc_a = BPC ## a ## A, \
+ .bpc_r = BPC ## r, \
+ .bpc_g = BPC ## g, \
+ .bpc_b = BPC ## b, \
+ .unpack = { e0, e1, e2, e3 }, \
+ .alpha_enable = alpha, \
+ .unpack_tight = tight, \
+ .cpp = c, \
+ .unpack_count = cnt, \
+ }
+
+#define BPC0A 0
+
+static const struct mdp_format formats[] = {
+ /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
+ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
+ FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
+ FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
+ FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
+ FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
+ FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
+};
+
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp_format *f = &formats[i];
+
+ if (i == max_formats)
+ break;
+
+ pixel_formats[i] = f->base.pixel_format;
+ }
+
+ return i;
+}
+
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp_format *f = &formats[i];
+ if (f->base.pixel_format == format)
+ return &f->base;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
new file mode 100644
index 00000000000..03455b64a24
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+
+struct mdp_irq_wait {
+ struct mdp_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp_kms *mdp_kms)
+{
+ struct mdp_irq *irq;
+ uint32_t irqmask = mdp_kms->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &mdp_kms->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+}
+
+static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list_lock, flags);
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
+{
+ struct mdp_irq *handler, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp_kms->in_irq = true;
+ list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
+ if (handler->irqmask & status) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & status);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ mdp_kms->in_irq = false;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+}
+
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ if (enable)
+ mdp_kms->vblank_mask |= mask;
+ else
+ mdp_kms->vblank_mask &= ~mask;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp_irq_wait *wait =
+ container_of(irq, struct mdp_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ struct mdp_irq_wait wait = {
+ .irq = {
+ .irq = wait_irq,
+ .irqmask = irqmask,
+ },
+ .count = 1,
+ };
+ mdp_irq_register(mdp_kms, &wait.irq);
+ wait_event_timeout(wait_event, (wait.count <= 0),
+ msecs_to_jiffies(100));
+ mdp_irq_unregister(mdp_kms, &wait.irq);
+}
+
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!irq->registered) {
+ irq->registered = true;
+ list_add(&irq->node, &mdp_kms->irq_list);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp_kms);
+}
+
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (irq->registered) {
+ irq->registered = false;
+ list_del(&irq->node);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp_kms);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
new file mode 100644
index 00000000000..99557b5ad4f
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP_KMS_H__
+#define __MDP_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp_common.xml.h"
+
+struct mdp_kms;
+
+struct mdp_kms_funcs {
+ struct msm_kms_funcs base;
+ void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+};
+
+struct mdp_kms {
+ struct msm_kms base;
+
+ const struct mdp_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+};
+#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
+
+static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
+ const struct mdp_kms_funcs *funcs)
+{
+ mdp_kms->funcs = funcs;
+ INIT_LIST_HEAD(&mdp_kms->irq_list);
+ msm_kms_init(&mdp_kms->base, &funcs->base);
+}
+
+/*
+ * irq helpers:
+ */
+
+/* For transiently registering for different MDP irqs that various parts
+ * of the KMS code need during setup/configuration. These are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
+};
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+
+
+/*
+ * pixel format helpers:
+ */
+
+struct mdp_format {
+ struct msm_format base;
+ enum mdp_bpc bpc_r, bpc_g, bpc_b;
+ enum mdp_bpc_alpha bpc_a;
+ uint8_t unpack[4];
+ bool alpha_enable, unpack_tight;
+ uint8_t cpp, unpack_count;
+};
+#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+
+#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 00000000000..9a5d87db5c2
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,1049 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_kms.h"
+
+static void msm_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ if (priv->fbdev)
+ drm_fb_helper_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = msm_framebuffer_create,
+ .output_poll_changed = msm_fb_output_poll_changed,
+};
+
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int idx = priv->num_mmus++;
+
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
+ return -EINVAL;
+
+ priv->mmus[idx] = mmu;
+
+ return idx;
+}
+
+#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
+static bool reglog = false;
+MODULE_PARM_DESC(reglog, "Enable register read/write logging");
+module_param(reglog, bool, 0600);
+#else
+#define reglog 0
+#endif
+
+static char *vram;
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+module_param(vram, charp, 0);
+
+/*
+ * Util/helpers:
+ */
+
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+{
+ struct resource *res;
+ unsigned long size;
+ void __iomem *ptr;
+
+ if (name)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ if (!ptr) {
+ dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (reglog)
+ printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
+
+ return ptr;
+}
+
+void msm_writel(u32 data, void __iomem *addr)
+{
+ if (reglog)
+ printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
+ writel(data, addr);
+}
+
+u32 msm_readl(const void __iomem *addr)
+{
+ u32 val = readl(addr);
+ if (reglog)
+ printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
+ return val;
+}
+
+/*
+ * DRM operations:
+ */
+
+static int msm_unload(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_gpu *gpu = priv->gpu;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ if (kms) {
+ pm_runtime_disable(dev->dev);
+ kms->funcs->destroy(kms);
+ }
+
+ if (gpu) {
+ mutex_lock(&dev->struct_mutex);
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->destroy(gpu);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (priv->vram.paddr) {
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ drm_mm_takedown(&priv->vram.mm);
+ dma_free_attrs(dev->dev, priv->vram.size, NULL,
+ priv->vram.paddr, &attrs);
+ }
+
+ component_unbind_all(dev->dev, dev);
+
+ dev->dev_private = NULL;
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int get_mdp_ver(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ static const struct of_device_id match_types[] = { {
+ .compatible = "qcom,mdss_mdp",
+ .data = (void *)5,
+ }, {
+ /* end node */
+ } };
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ match = of_match_node(match_types, dev->of_node);
+ if (match)
+ return (int)match->data;
+#endif
+ return 4;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int ret;
+
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ init_waitqueue_head(&priv->fence_event);
+
+ INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->fence_cbs);
+
+ drm_mode_config_init(dev);
+
+ /* if we have no IOMMU, then we need to use carveout allocator.
+ * Grab the entire CMA chunk carved out in early startup in
+ * mach-msm:
+ */
+ if (!iommu_present(&platform_bus_type)) {
+ DEFINE_DMA_ATTRS(attrs);
+ unsigned long size;
+ void *p;
+
+ DBG("using %s VRAM carveout", vram);
+ size = memparse(vram, NULL);
+ priv->vram.size = size;
+
+ drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+
+ /* note that for no-kernel-mapping, the vaddr returned
+ * is bogus, but non-null if allocation succeeded:
+ */
+ p = dma_alloc_attrs(dev->dev, size,
+ &priv->vram.paddr, GFP_KERNEL, &attrs);
+ if (!p) {
+ dev_err(dev->dev, "failed to allocate VRAM\n");
+ priv->vram.paddr = 0;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dev_info(dev->dev, "VRAM: %08x->%08x\n",
+ (uint32_t)priv->vram.paddr,
+ (uint32_t)(priv->vram.paddr + size));
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ return ret;
+
+ switch (get_mdp_ver(pdev)) {
+ case 4:
+ kms = mdp4_kms_init(dev);
+ break;
+ case 5:
+ kms = mdp5_kms_init(dev);
+ break;
+ default:
+ kms = ERR_PTR(-ENODEV);
+ break;
+ }
+
+ if (IS_ERR(kms)) {
+ /*
+ * NOTE: once we have GPU support, having no kms should not
+ * be considered fatal.. ideally we would still support gpu
+ * and (for example) use dmabuf/prime to share buffers with
+ * imx drm driver on iMX5
+ */
+ dev_err(dev->dev, "failed to load kms\n");
+ ret = PTR_ERR(kms);
+ goto fail;
+ }
+
+ priv->kms = kms;
+
+ if (kms) {
+ pm_runtime_enable(dev->dev);
+ ret = kms->funcs->hw_init(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+ ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
+
+#ifdef CONFIG_DRM_MSM_FBDEV
+ priv->fbdev = msm_fbdev_init(dev);
+#endif
+
+ ret = msm_debugfs_late_init(dev);
+ if (ret)
+ goto fail;
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+fail:
+ msm_unload(dev);
+ return ret;
+}
+
+static void load_gpu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu;
+
+ if (priv->gpu)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ gpu = a3xx_gpu_init(dev);
+ if (IS_ERR(gpu)) {
+ dev_warn(dev->dev, "failed to load a3xx gpu\n");
+ gpu = NULL;
+ /* not fatal */
+ }
+
+ if (gpu) {
+ int ret;
+ gpu->funcs->pm_resume(gpu);
+ ret = gpu->funcs->hw_init(gpu);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ gpu->funcs->destroy(gpu);
+ gpu = NULL;
+ } else {
+ /* give inactive pm a chance to kick in: */
+ msm_gpu_retire(gpu);
+ }
+
+ }
+
+ priv->gpu = gpu;
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx;
+
+ /* For now, load gpu on open.. to avoid the requirement of having
+ * firmware in the initrd.
+ */
+ load_gpu(dev);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->driver_priv = ctx;
+
+ return 0;
+}
+
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms)
+ kms->funcs->preclose(kms, file);
+
+ mutex_lock(&dev->struct_mutex);
+ if (ctx == priv->lastctx)
+ priv->lastctx = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(ctx);
+}
+
+static void msm_lastclose(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ if (priv->fbdev)
+ drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+}
+
+static irqreturn_t msm_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ return kms->funcs->irq(kms);
+}
+
+static void msm_irq_preinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ kms->funcs->irq_preinstall(kms);
+}
+
+static int msm_irq_postinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ return kms->funcs->irq_postinstall(kms);
+}
+
+static void msm_irq_uninstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ kms->funcs->irq_uninstall(kms);
+}
+
+static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return -ENXIO;
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
+ return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return;
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
+ kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+/*
+ * DRM debugfs:
+ */
+
+#ifdef CONFIG_DEBUG_FS
+static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, m);
+ }
+
+ return 0;
+}
+
+static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "Active Objects (%s):\n", gpu->name);
+ msm_gem_describe_objects(&gpu->active_list, m);
+ }
+
+ seq_printf(m, "Inactive Objects:\n");
+ msm_gem_describe_objects(&priv->inactive_list, m);
+
+ return 0;
+}
+
+static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+}
+
+static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (priv->fbdev) {
+ seq_printf(m, "fbcon ");
+ fbdev_fb = priv->fbdev->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int show_locked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = show(dev, m);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static struct drm_info_list msm_debugfs_list[] = {
+ {"gpu", show_locked, 0, msm_gpu_show},
+ {"gem", show_locked, 0, msm_gem_show},
+ { "mm", show_locked, 0, msm_mm_show },
+ { "fb", show_locked, 0, msm_fb_show },
+};
+
+static int late_init_minor(struct drm_minor *minor)
+{
+ int ret;
+
+ if (!minor)
+ return 0;
+
+ ret = msm_rd_debugfs_init(minor);
+ if (ret) {
+ dev_err(minor->dev->dev, "could not install rd debugfs\n");
+ return ret;
+ }
+
+ ret = msm_perf_debugfs_init(minor);
+ if (ret) {
+ dev_err(minor->dev->dev, "could not install perf debugfs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_debugfs_late_init(struct drm_device *dev)
+{
+ int ret;
+ ret = late_init_minor(dev->primary);
+ if (ret)
+ return ret;
+ ret = late_init_minor(dev->render);
+ if (ret)
+ return ret;
+ ret = late_init_minor(dev->control);
+ return ret;
+}
+
+static int msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void msm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list), minor);
+ if (!minor->dev->dev_private)
+ return;
+ msm_rd_debugfs_cleanup(minor);
+ msm_perf_debugfs_cleanup(minor);
+}
+#endif
+
+/*
+ * Fences:
+ */
+
+int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
+ struct timespec *timeout)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret;
+
+ if (!priv->gpu)
+ return 0;
+
+ if (fence > priv->gpu->submitted_fence) {
+ DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
+ fence, priv->gpu->submitted_fence);
+ return -EINVAL;
+ }
+
+ if (!timeout) {
+ /* no-wait: */
+ ret = fence_completed(dev, fence) ? 0 : -EBUSY;
+ } else {
+ unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+ unsigned long start_jiffies = jiffies;
+ unsigned long remaining_jiffies;
+
+ if (time_after(start_jiffies, timeout_jiffies))
+ remaining_jiffies = 0;
+ else
+ remaining_jiffies = timeout_jiffies - start_jiffies;
+
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ fence_completed(dev, fence),
+ remaining_jiffies);
+
+ if (ret == 0) {
+ DBG("timeout waiting for fence: %u (completed: %u)",
+ fence, priv->completed_fence);
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/* called from workqueue */
+void msm_update_fence(struct drm_device *dev, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ priv->completed_fence = max(fence, priv->completed_fence);
+
+ while (!list_empty(&priv->fence_cbs)) {
+ struct msm_fence_cb *cb;
+
+ cb = list_first_entry(&priv->fence_cbs,
+ struct msm_fence_cb, work.entry);
+
+ if (cb->fence > priv->completed_fence)
+ break;
+
+ list_del_init(&cb->work.entry);
+ queue_work(priv->wq, &cb->work);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ wake_up_all(&priv->fence_event);
+}
+
+void __msm_fence_worker(struct work_struct *work)
+{
+ struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
+ cb->func(cb);
+}
+
+/*
+ * DRM ioctls:
+ */
+
+static int msm_ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_param *args = data;
+ struct msm_gpu *gpu;
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if (args->pipe != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (!gpu)
+ return -ENXIO;
+
+ return gpu->funcs->get_param(gpu, args->param, &args->value);
+}
+
+static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_new *args = data;
+
+ if (args->flags & ~MSM_BO_FLAGS) {
+ DRM_ERROR("invalid flags: %08x\n", args->flags);
+ return -EINVAL;
+ }
+
+ return msm_gem_new_handle(dev, file, args->size,
+ args->flags, &args->handle);
+}
+
+#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
+
+static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->op & ~MSM_PREP_FLAGS) {
+ DRM_ERROR("invalid op: %08x\n", args->op);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_fini(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ args->offset = msm_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_wait_fence *args = data;
+
+ if (args->pad) {
+ DRM_ERROR("invalid pad: %08x\n", args->pad);
+ return -EINVAL;
+ }
+
+ return msm_wait_fence_interruptable(dev, args->fence,
+ &TS(args->timeout));
+}
+
+static const struct drm_ioctl_desc msm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = msm_gem_mmap,
+};
+
+static struct drm_driver msm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ |
+ DRIVER_GEM |
+ DRIVER_PRIME |
+ DRIVER_RENDER |
+ DRIVER_MODESET,
+ .load = msm_load,
+ .unload = msm_unload,
+ .open = msm_open,
+ .preclose = msm_preclose,
+ .lastclose = msm_lastclose,
+ .irq_handler = msm_irq,
+ .irq_preinstall = msm_irq_preinstall,
+ .irq_postinstall = msm_irq_postinstall,
+ .irq_uninstall = msm_irq_uninstall,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = msm_enable_vblank,
+ .disable_vblank = msm_disable_vblank,
+ .gem_free_object = msm_gem_free_object,
+ .gem_vm_ops = &vm_ops,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = msm_gem_prime_pin,
+ .gem_prime_unpin = msm_gem_prime_unpin,
+ .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+ .gem_prime_vmap = msm_gem_prime_vmap,
+ .gem_prime_vunmap = msm_gem_prime_vunmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+ .debugfs_cleanup = msm_debugfs_cleanup,
+#endif
+ .ioctls = msm_ioctls,
+ .num_ioctls = DRM_MSM_NUM_IOCTLS,
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .date = "20130625",
+ .major = 1,
+ .minor = 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(ddev);
+
+ return 0;
+}
+
+static int msm_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops msm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+};
+
+/*
+ * Componentized driver support:
+ */
+
+#ifdef CONFIG_OF
+/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
+ * (or probably any other).. so probably some room for some helpers
+ */
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int msm_drm_add_components(struct device *master, struct master *m)
+{
+ struct device_node *np = master->of_node;
+ unsigned i;
+ int ret;
+
+ for (i = 0; ; i++) {
+ struct device_node *node;
+
+ node = of_parse_phandle(np, "connectors", i);
+ if (!node)
+ break;
+
+ ret = component_master_add_child(m, compare_of, node);
+ of_node_put(node);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+#else
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev == data;
+}
+
+static int msm_drm_add_components(struct device *master, struct master *m)
+{
+ /* For non-DT case, it kinda sucks. We don't actually have a way
+ * to know whether or not we are waiting for certain devices (or if
+ * they are simply not present). But for non-DT we only need to
+ * care about apq8064/apq8060/etc (all mdp4/a3xx):
+ */
+ static const char *devnames[] = {
+ "hdmi_msm.0", "kgsl-3d0.0",
+ };
+ int i;
+
+ DBG("Adding components..");
+
+ for (i = 0; i < ARRAY_SIZE(devnames); i++) {
+ struct device *dev;
+ int ret;
+
+ dev = bus_find_device_by_name(&platform_bus_type,
+ NULL, devnames[i]);
+ if (!dev) {
+ dev_info(master, "still waiting for %s\n", devnames[i]);
+ return -EPROBE_DEFER;
+ }
+
+ ret = component_master_add_child(m, compare_dev, dev);
+ if (ret) {
+ DBG("could not add child: %d", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static int msm_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&msm_driver, to_platform_device(dev));
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+ .add_components = msm_drm_add_components,
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
+};
+
+/*
+ * Platform driver:
+ */
+
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ return component_master_add(&pdev->dev, &msm_drm_ops);
+}
+
+static int msm_pdev_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+static const struct platform_device_id msm_id[] = {
+ { "mdp", 0 },
+ { }
+};
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss_mdp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_pdev_probe,
+ .remove = msm_pdev_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "msm",
+ .of_match_table = dt_match,
+ .pm = &msm_pm_ops,
+ },
+ .id_table = msm_id,
+};
+
+static int __init msm_drm_register(void)
+{
+ DBG("init");
+ hdmi_register();
+ a3xx_register();
+ return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_drm_unregister(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&msm_platform_driver);
+ hdmi_unregister();
+ a3xx_unregister();
+}
+
+module_init(msm_drm_register);
+module_exit(msm_drm_unregister);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("MSM DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 00000000000..8a2c5fd0893
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DRV_H__
+#define __MSM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/component.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/types.h>
+#include <asm/sizes.h>
+
+
+#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
+/* stubs we need for compile-test: */
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
+
+#ifndef CONFIG_OF
+#include <mach/board.h>
+#include <mach/socinfo.h>
+#include <mach/iommu_domains.h>
+#endif
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/msm_drm.h>
+
+struct msm_kms;
+struct msm_gpu;
+struct msm_mmu;
+struct msm_rd_state;
+struct msm_perf_state;
+struct msm_gem_submit;
+
+#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
+
+struct msm_file_private {
+ /* currently we don't do anything useful with this.. but when
+ * per-context address spaces are supported we'd keep track of
+ * the context's page-tables here.
+ */
+ int dummy;
+};
+
+struct msm_drm_private {
+
+ struct msm_kms *kms;
+
+ /* subordinate devices, if present: */
+ struct platform_device *hdmi_pdev, *gpu_pdev;
+
+ /* when we have more than one 'msm_gpu' these need to be an array: */
+ struct msm_gpu *gpu;
+ struct msm_file_private *lastctx;
+
+ struct drm_fb_helper *fbdev;
+
+ uint32_t next_fence, completed_fence;
+ wait_queue_head_t fence_event;
+
+ struct msm_rd_state *rd;
+ struct msm_perf_state *perf;
+
+ /* list of GEM objects: */
+ struct list_head inactive_list;
+
+ struct workqueue_struct *wq;
+
+ /* callbacks deferred until bo is inactive: */
+ struct list_head fence_cbs;
+
+ /* registered MMUs: */
+ unsigned int num_mmus;
+ struct msm_mmu *mmus[NUM_DOMAINS];
+
+ unsigned int num_planes;
+ struct drm_plane *planes[8];
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[8];
+
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+
+ unsigned int num_bridges;
+ struct drm_bridge *bridges[8];
+
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+
+ /* VRAM carveout, used when no IOMMU: */
+ struct {
+ unsigned long size;
+ dma_addr_t paddr;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ } vram;
+};
+
+struct msm_format {
+ uint32_t pixel_format;
+};
+
+/* callback from wq once fence has passed: */
+struct msm_fence_cb {
+ struct work_struct work;
+ uint32_t fence;
+ void (*func)(struct msm_fence_cb *cb);
+};
+
+void __msm_fence_worker(struct work_struct *work);
+
+#define INIT_FENCE_CB(_cb, _func) do { \
+ INIT_WORK(&(_cb)->work, __msm_fence_worker); \
+ (_cb)->func = _func; \
+ } while (0)
+
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
+int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
+ struct timespec *timeout);
+void msm_update_fence(struct drm_device *dev, uint32_t fence);
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
+ uint32_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+struct page **msm_gem_get_pages(struct drm_gem_object *obj);
+void msm_gem_put_pages(struct drm_gem_object *obj);
+void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+void *msm_gem_prime_vmap(struct drm_gem_object *obj);
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size, struct sg_table *sg);
+int msm_gem_prime_pin(struct drm_gem_object *obj);
+void msm_gem_prime_unpin(struct drm_gem_object *obj);
+void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_vaddr(struct drm_gem_object *obj);
+int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
+ struct msm_fence_cb *cb);
+void msm_gem_move_to_active(struct drm_gem_object *obj,
+ struct msm_gpu *gpu, bool write, uint32_t fence);
+void msm_gem_move_to_inactive(struct drm_gem_object *obj);
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+ struct timespec *timeout);
+int msm_gem_cpu_fini(struct drm_gem_object *obj);
+void msm_gem_free_object(struct drm_gem_object *obj);
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle);
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags);
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ uint32_t size, struct sg_table *sgt);
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+
+struct hdmi;
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+irqreturn_t hdmi_irq(int irq, void *dev_id);
+void __init hdmi_register(void);
+void __exit hdmi_unregister(void);
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+int msm_debugfs_late_init(struct drm_device *dev);
+int msm_rd_debugfs_init(struct drm_minor *minor);
+void msm_rd_debugfs_cleanup(struct drm_minor *minor);
+void msm_rd_dump_submit(struct msm_gem_submit *submit);
+int msm_perf_debugfs_init(struct drm_minor *minor);
+void msm_perf_debugfs_cleanup(struct drm_minor *minor);
+#else
+static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
+#endif
+
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname);
+void msm_writel(u32 data, void __iomem *addr);
+u32 msm_readl(const void __iomem *addr);
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ return priv->completed_fence >= fence;
+}
+
+static inline int align_pitch(int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* adreno needs pitch aligned to 32 pixels: */
+ return bytespp * ALIGN(width, 32);
+}
+
+/* for the generated headers: */
+#define INVALID_IDX(idx) ({BUG(); 0;})
+#define fui(x) ({BUG(); 0;})
+#define util_float_to_half(x) ({BUG(); 0;})
+
+
+#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
+
+/* for conditionally setting boolean flag(s): */
+#define COND(bool, val) ((bool) ? (val) : 0)
+
+
+#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 00000000000..81bafdf19ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct msm_framebuffer {
+ struct drm_framebuffer base;
+ const struct msm_format *format;
+ struct drm_gem_object *planes[2];
+};
+#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+
+
+static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return drm_gem_handle_create(file_priv,
+ msm_fb->planes[0], handle);
+}
+
+static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ for (i = 0; i < n; i++) {
+ struct drm_gem_object *bo = msm_fb->planes[i];
+ if (bo)
+ drm_gem_object_unreference_unlocked(bo);
+ }
+
+ kfree(msm_fb);
+}
+
+static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
+ .create_handle = msm_framebuffer_create_handle,
+ .destroy = msm_framebuffer_destroy,
+ .dirty = msm_framebuffer_dirty,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
+ fb->width, fb->height, (char *)&fb->pixel_format,
+ fb->refcount.refcount.counter, fb->base.id);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ msm_gem_describe(msm_fb->planes[i], m);
+ }
+}
+#endif
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->planes[plane];
+}
+
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->format;
+}
+
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *bos[4] = {0};
+ struct drm_framebuffer *fb;
+ int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ bos[i] = drm_gem_object_lookup(dev, file,
+ mode_cmd->handles[i]);
+ if (!bos[i]) {
+ ret = -ENXIO;
+ goto out_unref;
+ }
+ }
+
+ fb = msm_framebuffer_init(dev, mode_cmd, bos);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto out_unref;
+ }
+
+ return fb;
+
+out_unref:
+ for (i = 0; i < n; i++)
+ drm_gem_object_unreference_unlocked(bos[i]);
+ return ERR_PTR(ret);
+}
+
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_framebuffer *msm_fb;
+ struct drm_framebuffer *fb = NULL;
+ const struct msm_format *format;
+ int ret, i, n;
+ unsigned int hsub, vsub;
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ (char *)&mode_cmd->pixel_format);
+
+ n = drm_format_num_planes(mode_cmd->pixel_format);
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+ if (!format) {
+ dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ (char *)&mode_cmd->pixel_format);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
+ if (!msm_fb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = &msm_fb->base;
+
+ msm_fb->format = format;
+
+ for (i = 0; i < n; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + mode_cmd->offsets[i];
+
+ if (bos[i]->size < min_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb->planes[i] = bos[i];
+ }
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ return fb;
+
+fail:
+ if (fb)
+ msm_framebuffer_destroy(fb);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 00000000000..5107fc4826b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
+
+struct msm_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *bo;
+};
+
+static struct fb_ops msm_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int msm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
+ uint32_t paddr;
+ int ret, size;
+
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 24;
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = align_pitch(
+ mode_cmd.width, sizes->surface_bpp);
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+ mutex_lock(&dev->struct_mutex);
+ fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(fbdev->bo)) {
+ ret = PTR_ERR(fbdev->bo);
+ fbdev->bo = NULL;
+ dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
+ goto fail;
+ }
+
+ fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference(fbdev->bo);
+ ret = PTR_ERR(fb);
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* TODO implement our own fb_mmap so we don't need this: */
+ msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &msm_fb_ops;
+
+ strcpy(fbi->fix.id, "msm");
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_size = fbdev->bo->size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = fbdev->bo->size;
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+
+ if (ret) {
+ if (fbi)
+ framebuffer_release(fbi);
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+ }
+ }
+
+ return ret;
+}
+
+static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+}
+
+static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+}
+
+static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+ .gamma_set = msm_crtc_fb_gamma_set,
+ .gamma_get = msm_crtc_fb_gamma_get,
+ .fb_probe = msm_fbdev_create,
+};
+
+/* initialize fbdev helper */
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ goto fail;
+
+ helper = &fbdev->base;
+
+ helper->funcs = &msm_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper,
+ priv->num_crtcs, priv->num_connectors);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void msm_fbdev_free(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct msm_fbdev *fbdev;
+ struct fb_info *fbi;
+
+ DBG();
+
+ fbi = helper->fbdev;
+
+ /* only cleanup framebuffer if it is present */
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+ }
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_msm_fbdev(priv->fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
+ drm_framebuffer_remove(fbdev->fb);
+ }
+
+ kfree(fbdev);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 00000000000..690d7e7b6d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_gpu.h"
+#include "msm_mmu.h"
+
+static dma_addr_t physaddr(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
+ priv->vram.paddr;
+}
+
+/* allocate pages from VRAM carveout, used when no IOMMU: */
+static struct page **get_pages_vram(struct drm_gem_object *obj,
+ int npages)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ dma_addr_t paddr;
+ struct page **p;
+ int ret, i;
+
+ p = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
+ npages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret) {
+ drm_free_large(p);
+ return ERR_PTR(ret);
+ }
+
+ paddr = physaddr(obj);
+ for (i = 0; i < npages; i++) {
+ p[i] = phys_to_page(paddr);
+ paddr += PAGE_SIZE;
+ }
+
+ return p;
+}
+
+/* called with dev->struct_mutex held */
+static struct page **get_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->pages) {
+ struct drm_device *dev = obj->dev;
+ struct page **p;
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (iommu_present(&platform_bus_type))
+ p = drm_gem_get_pages(obj, 0);
+ else
+ p = get_pages_vram(obj, npages);
+
+ if (IS_ERR(p)) {
+ dev_err(dev->dev, "could not get pages: %ld\n",
+ PTR_ERR(p));
+ return p;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (IS_ERR(msm_obj->sgt)) {
+ dev_err(dev->dev, "failed to allocate sgt\n");
+ return ERR_CAST(msm_obj->sgt);
+ }
+
+ msm_obj->pages = p;
+
+ /* For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_map_sg(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+
+ return msm_obj->pages;
+}
+
+static void put_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (msm_obj->pages) {
+ /* For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+
+ if (iommu_present(&platform_bus_type))
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ else {
+ drm_mm_remove_node(msm_obj->vram_node);
+ drm_free_large(msm_obj->pages);
+ }
+
+ msm_obj->pages = NULL;
+ }
+}
+
+struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct page **p;
+ mutex_lock(&dev->struct_mutex);
+ p = get_pages(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return p;
+}
+
+void msm_gem_put_pages(struct drm_gem_object *obj)
+{
+ /* when we start tracking the pin count, then do something here */
+}
+
+int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ if (msm_obj->flags & MSM_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else if (msm_obj->flags & MSM_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ } else {
+ /*
+ * Shunt off cached objs to shmem file so they have their own
+ * address_space (so unmap_mapping_range does what we want,
+ * in particular in the case of mmap'd dmabufs)
+ */
+ fput(vma->vm_file);
+ get_file(obj->filp);
+ vma->vm_pgoff = 0;
+ vma->vm_file = obj->filp;
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ return 0;
+}
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ return msm_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
+ struct page **pages;
+ unsigned long pfn;
+ pgoff_t pgoff;
+ int ret;
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ goto out;
+
+ /* make sure we have pages attached now */
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out_unlock;
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(pages[pgoff]);
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ switch (ret) {
+ case -EAGAIN:
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* Make it mmapable */
+ ret = drm_gem_create_mmap_offset(obj);
+
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
+ }
+
+ return drm_vma_node_offset_addr(&obj->vma_node);
+}
+
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ uint64_t offset;
+ mutex_lock(&obj->dev->struct_mutex);
+ offset = mmap_offset(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return offset;
+}
+
+/* should be called under struct_mutex.. although it can be called
+ * from atomic context without struct_mutex to acquire an extra
+ * iova ref if you know one is already held.
+ *
+ * That means when I do eventually need to add support for unpinning
+ * the refcnt counter needs to be atomic_t.
+ */
+int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
+ uint32_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ int ret = 0;
+
+ if (!msm_obj->domain[id].iova) {
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_mmu *mmu = priv->mmus[id];
+ struct page **pages = get_pages(obj);
+
+ if (!mmu) {
+ dev_err(dev->dev, "null MMU pointer\n");
+ return -EINVAL;
+ }
+
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ if (iommu_present(&platform_bus_type)) {
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
+ obj->size, IOMMU_READ | IOMMU_WRITE);
+ msm_obj->domain[id].iova = offset;
+ } else {
+ msm_obj->domain[id].iova = physaddr(obj);
+ }
+ }
+
+ if (!ret)
+ *iova = msm_obj->domain[id].iova;
+
+ return ret;
+}
+
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret;
+
+ /* this is safe right now because we don't unmap until the
+ * bo is deleted:
+ */
+ if (msm_obj->domain[id].iova) {
+ *iova = msm_obj->domain[id].iova;
+ return 0;
+ }
+
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = msm_gem_get_iova_locked(obj, id, iova);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+{
+ // XXX TODO ..
+ // NOTE: probably don't need a _locked() version.. we wouldn't
+ // normally unmap here, but instead just mark that it could be
+ // unmapped (if the iova refcnt drops to zero), but then later
+ // if another _get_iova_locked() fails we can start unmapping
+ // things that are no longer needed..
+}
+
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = align_pitch(args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+ return msm_gem_new_handle(dev, file, args->size,
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+}
+
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = msm_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+fail:
+ return ret;
+}
+
+void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ if (!msm_obj->vaddr) {
+ struct page **pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+ msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ return msm_obj->vaddr;
+}
+
+void *msm_gem_vaddr(struct drm_gem_object *obj)
+{
+ void *ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = msm_gem_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* setup callback for when bo is no longer busy..
+ * TODO probably want to differentiate read vs write..
+ */
+int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
+ struct msm_fence_cb *cb)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&cb->work.entry)) {
+ ret = -EINVAL;
+ } else if (is_active(msm_obj)) {
+ cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
+ list_add_tail(&cb->work.entry, &priv->fence_cbs);
+ } else {
+ queue_work(priv->wq, &cb->work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+void msm_gem_move_to_active(struct drm_gem_object *obj,
+ struct msm_gpu *gpu, bool write, uint32_t fence)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ msm_obj->gpu = gpu;
+ if (write)
+ msm_obj->write_fence = fence;
+ else
+ msm_obj->read_fence = fence;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+}
+
+void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ msm_obj->gpu = NULL;
+ msm_obj->read_fence = 0;
+ msm_obj->write_fence = 0;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+}
+
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+ struct timespec *timeout)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ if (is_active(msm_obj)) {
+ uint32_t fence = 0;
+
+ if (op & MSM_PREP_READ)
+ fence = msm_obj->write_fence;
+ if (op & MSM_PREP_WRITE)
+ fence = max(fence, msm_obj->read_fence);
+ if (op & MSM_PREP_NOSYNC)
+ timeout = NULL;
+
+ ret = msm_wait_fence_interruptable(dev, fence, timeout);
+ }
+
+ /* TODO cache maintenance */
+
+ return ret;
+}
+
+int msm_gem_cpu_fini(struct drm_gem_object *obj)
+{
+ /* TODO cache maintenance */
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ uint64_t off = drm_vma_node_start(&obj->vma_node);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
+ msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->read_fence, msm_obj->write_fence,
+ obj->name, obj->refcount.refcount.counter,
+ off, msm_obj->vaddr, obj->size);
+}
+
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+ struct msm_gem_object *msm_obj;
+ int count = 0;
+ size_t size = 0;
+
+ list_for_each_entry(msm_obj, list, mm_list) {
+ struct drm_gem_object *obj = &msm_obj->base;
+ seq_printf(m, " ");
+ msm_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+void msm_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int id;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* object should not be on active list: */
+ WARN_ON(is_active(msm_obj));
+
+ list_del(&msm_obj->mm_list);
+
+ for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ struct msm_mmu *mmu = priv->mmus[id];
+ if (mmu && msm_obj->domain[id].iova) {
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
+ }
+ }
+
+ drm_gem_free_mmap_offset(obj);
+
+ if (obj->import_attach) {
+ if (msm_obj->vaddr)
+ dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+
+ /* Don't drop the pages for imported dmabuf, as they are not
+ * ours, just free the array we allocated:
+ */
+ if (msm_obj->pages)
+ drm_free_large(msm_obj->pages);
+
+ } else {
+ if (msm_obj->vaddr)
+ vunmap(msm_obj->vaddr);
+ put_pages(obj);
+ }
+
+ if (msm_obj->resv == &msm_obj->_resv)
+ reservation_object_fini(msm_obj->resv);
+
+ drm_gem_object_release(obj);
+
+ kfree(msm_obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ obj = msm_gem_new(dev, size, flags);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags,
+ struct drm_gem_object **obj)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+ unsigned sz;
+
+ switch (flags & MSM_BO_CACHE_MASK) {
+ case MSM_BO_UNCACHED:
+ case MSM_BO_CACHED:
+ case MSM_BO_WC:
+ break;
+ default:
+ dev_err(dev->dev, "invalid cache flag: %x\n",
+ (flags & MSM_BO_CACHE_MASK));
+ return -EINVAL;
+ }
+
+ sz = sizeof(*msm_obj);
+ if (!iommu_present(&platform_bus_type))
+ sz += sizeof(struct drm_mm_node);
+
+ msm_obj = kzalloc(sz, GFP_KERNEL);
+ if (!msm_obj)
+ return -ENOMEM;
+
+ if (!iommu_present(&platform_bus_type))
+ msm_obj->vram_node = (void *)&msm_obj[1];
+
+ msm_obj->flags = flags;
+
+ msm_obj->resv = &msm_obj->_resv;
+ reservation_object_init(msm_obj->resv);
+
+ INIT_LIST_HEAD(&msm_obj->submit_entry);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+
+ *obj = &msm_obj->base;
+
+ return 0;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ struct drm_gem_object *obj = NULL;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ size = PAGE_ALIGN(size);
+
+ ret = msm_gem_new_impl(dev, size, flags, &obj);
+ if (ret)
+ goto fail;
+
+ if (iommu_present(&platform_bus_type)) {
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ } else {
+ drm_gem_private_object_init(dev, obj, size);
+ }
+
+ return obj;
+
+fail:
+ if (obj)
+ drm_gem_object_unreference(obj);
+
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ uint32_t size, struct sg_table *sgt)
+{
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj;
+ int ret, npages;
+
+ /* if we don't have IOMMU, don't bother pretending we can import: */
+ if (!iommu_present(&platform_bus_type)) {
+ dev_err(dev->dev, "cannot import without IOMMU\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = PAGE_ALIGN(size);
+
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ if (ret)
+ goto fail;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ npages = size / PAGE_SIZE;
+
+ msm_obj = to_msm_bo(obj);
+ msm_obj->sgt = sgt;
+ msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!msm_obj->pages) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+ if (ret)
+ goto fail;
+
+ return obj;
+
+fail:
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 00000000000..bfb052688f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_GEM_H__
+#define __MSM_GEM_H__
+
+#include <linux/reservation.h>
+#include "msm_drv.h"
+
+struct msm_gem_object {
+ struct drm_gem_object base;
+
+ uint32_t flags;
+
+ /* And object is either:
+ * inactive - on priv->inactive_list
+ * active - on one one of the gpu's active_list.. well, at
+ * least for now we don't have (I don't think) hw sync between
+ * 2d and 3d one devices which have both, meaning we need to
+ * block on submit if a bo is already on other ring
+ *
+ */
+ struct list_head mm_list;
+ struct msm_gpu *gpu; /* non-null if active */
+ uint32_t read_fence, write_fence;
+
+ /* Transiently in the process of submit ioctl, objects associated
+ * with the submit are on submit->bo_list.. this only lasts for
+ * the duration of the ioctl, so one bo can never be on multiple
+ * submit lists.
+ */
+ struct list_head submit_entry;
+
+ struct page **pages;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct {
+ // XXX
+ uint32_t iova;
+ } domain[NUM_DOMAINS];
+
+ /* normally (resv == &_resv) except for imported bo's */
+ struct reservation_object *resv;
+ struct reservation_object _resv;
+
+ /* For physically contiguous buffers. Used when we don't have
+ * an IOMMU.
+ */
+ struct drm_mm_node *vram_node;
+};
+#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+
+static inline bool is_active(struct msm_gem_object *msm_obj)
+{
+ return msm_obj->gpu != NULL;
+}
+
+#define MAX_CMDS 4
+
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+ * associated with the cmdstream submission for synchronization (and
+ * make it easier to unwind when things go wrong, etc). This only
+ * lasts for the duration of the submit-ioctl.
+ */
+struct msm_gem_submit {
+ struct drm_device *dev;
+ struct msm_gpu *gpu;
+ struct list_head bo_list;
+ struct ww_acquire_ctx ticket;
+ uint32_t fence;
+ bool valid;
+ unsigned int nr_cmds;
+ unsigned int nr_bos;
+ struct {
+ uint32_t type;
+ uint32_t size; /* in dwords */
+ uint32_t iova;
+ uint32_t idx; /* cmdstream buffer idx in bos[] */
+ } cmd[MAX_CMDS];
+ struct {
+ uint32_t flags;
+ struct msm_gem_object *obj;
+ uint32_t iova;
+ } bos[0];
+};
+
+#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
new file mode 100644
index 00000000000..d48f9fc5129
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ BUG_ON(!msm_obj->sgt); /* should have already pinned! */
+ return msm_obj->sgt;
+}
+
+void *msm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ return msm_gem_vaddr(obj);
+}
+
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* TODO msm_gem_vunmap() */
+}
+
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size, struct sg_table *sg)
+{
+ return msm_gem_import(dev, size, sg);
+}
+
+int msm_gem_prime_pin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach)
+ msm_gem_get_pages(obj);
+ return 0;
+}
+
+void msm_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach)
+ msm_gem_put_pages(obj);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 00000000000..cd0554f6831
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
+#define BO_VALID 0x8000
+#define BO_LOCKED 0x4000
+#define BO_PINNED 0x2000
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+static struct msm_gem_submit *submit_create(struct drm_device *dev,
+ struct msm_gpu *gpu, int nr)
+{
+ struct msm_gem_submit *submit;
+ int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+
+ submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (submit) {
+ submit->dev = dev;
+ submit->gpu = gpu;
+
+ /* initially, until copy_from_user() and bo lookup succeeds: */
+ submit->nr_bos = 0;
+ submit->nr_cmds = 0;
+
+ INIT_LIST_HEAD(&submit->bo_list);
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ }
+
+ return submit;
+}
+
+static int submit_lookup_objects(struct msm_gem_submit *submit,
+ struct drm_msm_gem_submit *args, struct drm_file *file)
+{
+ unsigned i;
+ int ret = 0;
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_msm_gem_submit_bo submit_bo;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ void __user *userptr =
+ to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+ DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ submit->bos[i].flags = submit_bo.flags;
+ /* in validate_objects() we figure out if this is true: */
+ submit->bos[i].iova = submit_bo.presumed;
+
+ /* normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, submit_bo.handle);
+ if (!obj) {
+ DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ msm_obj = to_msm_bo(obj);
+
+ if (!list_empty(&msm_obj->submit_entry)) {
+ DRM_ERROR("handle %u at index %u already on submit list\n",
+ submit_bo.handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ drm_gem_object_reference(obj);
+
+ submit->bos[i].obj = msm_obj;
+
+ list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
+ }
+
+out_unlock:
+ submit->nr_bos = i;
+ spin_unlock(&file->table_lock);
+
+ return ret;
+}
+
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+{
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+
+ if (submit->bos[i].flags & BO_PINNED)
+ msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+
+ if (submit->bos[i].flags & BO_LOCKED)
+ ww_mutex_unlock(&msm_obj->resv->lock);
+
+ if (!(submit->bos[i].flags & BO_VALID))
+ submit->bos[i].iova = 0;
+
+ submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
+}
+
+/* This is where we make sure all the bo's are reserved and pin'd: */
+static int submit_validate_objects(struct msm_gem_submit *submit)
+{
+ int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+ submit->valid = true;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ uint32_t iova;
+
+ if (slow_locked == i)
+ slow_locked = -1;
+
+ contended = i;
+
+ if (!(submit->bos[i].flags & BO_LOCKED)) {
+ ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
+ &submit->ticket);
+ if (ret)
+ goto fail;
+ submit->bos[i].flags |= BO_LOCKED;
+ }
+
+
+ /* if locking succeeded, pin bo: */
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
+ submit->gpu->id, &iova);
+
+ /* this would break the logic in the fail path.. there is no
+ * reason for this to happen, but just to be on the safe side
+ * let's notice if this starts happening in the future:
+ */
+ WARN_ON(ret == -EDEADLK);
+
+ if (ret)
+ goto fail;
+
+ submit->bos[i].flags |= BO_PINNED;
+
+ if (iova == submit->bos[i].iova) {
+ submit->bos[i].flags |= BO_VALID;
+ } else {
+ submit->bos[i].iova = iova;
+ submit->bos[i].flags &= ~BO_VALID;
+ submit->valid = false;
+ }
+ }
+
+ ww_acquire_done(&submit->ticket);
+
+ return 0;
+
+fail:
+ for (; i >= 0; i--)
+ submit_unlock_unpin_bo(submit, i);
+
+ if (slow_locked > 0)
+ submit_unlock_unpin_bo(submit, slow_locked);
+
+ if (ret == -EDEADLK) {
+ struct msm_gem_object *msm_obj = submit->bos[contended].obj;
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
+ &submit->ticket);
+ if (!ret) {
+ submit->bos[contended].flags |= BO_LOCKED;
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+
+ return ret;
+}
+
+static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
+ struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+{
+ if (idx >= submit->nr_bos) {
+ DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
+ return -EINVAL;
+ }
+
+ if (obj)
+ *obj = submit->bos[idx].obj;
+ if (iova)
+ *iova = submit->bos[idx].iova;
+ if (valid)
+ *valid = !!(submit->bos[idx].flags & BO_VALID);
+
+ return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
+ uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+{
+ uint32_t i, last_offset = 0;
+ uint32_t *ptr;
+ int ret;
+
+ if (offset % 4) {
+ DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
+ return -EINVAL;
+ }
+
+ /* For now, just map the entire thing. Eventually we probably
+ * to do it page-by-page, w/ kmap() if not vmap()d..
+ */
+ ptr = msm_gem_vaddr_locked(&obj->base);
+
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ DBG("failed to map: %d", ret);
+ return ret;
+ }
+
+ for (i = 0; i < nr_relocs; i++) {
+ struct drm_msm_gem_submit_reloc submit_reloc;
+ void __user *userptr =
+ to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+ uint32_t iova, off;
+ bool valid;
+
+ ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
+ if (ret)
+ return -EFAULT;
+
+ if (submit_reloc.submit_offset % 4) {
+ DRM_ERROR("non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
+ return -EINVAL;
+ }
+
+ /* offset in dwords: */
+ off = submit_reloc.submit_offset / 4;
+
+ if ((off >= (obj->base.size / 4)) ||
+ (off < last_offset)) {
+ DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+ return -EINVAL;
+ }
+
+ ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ if (ret)
+ return ret;
+
+ if (valid)
+ continue;
+
+ iova += submit_reloc.reloc_offset;
+
+ if (submit_reloc.shift < 0)
+ iova >>= -submit_reloc.shift;
+ else
+ iova <<= submit_reloc.shift;
+
+ ptr[off] = iova | submit_reloc.or;
+
+ last_offset = off;
+ }
+
+ return 0;
+}
+
+static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+{
+ unsigned i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ submit_unlock_unpin_bo(submit, i);
+ list_del_init(&msm_obj->submit_entry);
+ drm_gem_object_unreference(&msm_obj->base);
+ }
+
+ ww_acquire_fini(&submit->ticket);
+ kfree(submit);
+}
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_gem_submit *args = data;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_gem_submit *submit;
+ struct msm_gpu *gpu;
+ unsigned i;
+ int ret;
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if (args->pipe != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (args->nr_cmds > MAX_CMDS)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+
+ submit = submit_create(dev, gpu, args->nr_bos);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = submit_lookup_objects(submit, args, file);
+ if (ret)
+ goto out;
+
+ ret = submit_validate_objects(submit);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < args->nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd submit_cmd;
+ void __user *userptr =
+ to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+ struct msm_gem_object *msm_obj;
+ uint32_t iova;
+
+ ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* validate input from userspace: */
+ switch (submit_cmd.type) {
+ case MSM_SUBMIT_CMD_BUF:
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ break;
+ default:
+ DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = submit_bo(submit, submit_cmd.submit_idx,
+ &msm_obj, &iova, NULL);
+ if (ret)
+ goto out;
+
+ if (submit_cmd.size % 4) {
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((submit_cmd.size + submit_cmd.submit_offset) >=
+ msm_obj->base.size) {
+ DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ submit->cmd[i].type = submit_cmd.type;
+ submit->cmd[i].size = submit_cmd.size / 4;
+ submit->cmd[i].iova = iova + submit_cmd.submit_offset;
+ submit->cmd[i].idx = submit_cmd.submit_idx;
+
+ if (submit->valid)
+ continue;
+
+ ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
+ submit_cmd.nr_relocs, submit_cmd.relocs);
+ if (ret)
+ goto out;
+ }
+
+ submit->nr_cmds = i;
+
+ ret = msm_gpu_submit(gpu, submit, ctx);
+
+ args->fence = submit->fence;
+
+out:
+ if (submit)
+ submit_cleanup(submit, !!ret);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 00000000000..c6322197db8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+
+/*
+ * Power Management:
+ */
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+static void bs_init(struct msm_gpu *gpu)
+{
+ if (gpu->bus_scale_table) {
+ gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
+ DBG("bus scale client: %08x", gpu->bsc);
+ }
+}
+
+static void bs_fini(struct msm_gpu *gpu)
+{
+ if (gpu->bsc) {
+ msm_bus_scale_unregister_client(gpu->bsc);
+ gpu->bsc = 0;
+ }
+}
+
+static void bs_set(struct msm_gpu *gpu, int idx)
+{
+ if (gpu->bsc) {
+ DBG("set bus scaling: %d", idx);
+ msm_bus_scale_client_update_request(gpu->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct msm_gpu *gpu) {}
+static void bs_fini(struct msm_gpu *gpu) {}
+static void bs_set(struct msm_gpu *gpu, int idx) {}
+#endif
+
+static int enable_pwrrail(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret = 0;
+
+ if (gpu->gpu_reg) {
+ ret = regulator_enable(gpu->gpu_reg);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (gpu->gpu_cx) {
+ ret = regulator_enable(gpu->gpu_cx);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int disable_pwrrail(struct msm_gpu *gpu)
+{
+ if (gpu->gpu_cx)
+ regulator_disable(gpu->gpu_cx);
+ if (gpu->gpu_reg)
+ regulator_disable(gpu->gpu_reg);
+ return 0;
+}
+
+static int enable_clk(struct msm_gpu *gpu)
+{
+ struct clk *rate_clk = NULL;
+ int i;
+
+ /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
+ if (gpu->grp_clks[i]) {
+ clk_prepare(gpu->grp_clks[i]);
+ rate_clk = gpu->grp_clks[i];
+ }
+ }
+
+ if (rate_clk && gpu->fast_rate)
+ clk_set_rate(rate_clk, gpu->fast_rate);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ if (gpu->grp_clks[i])
+ clk_enable(gpu->grp_clks[i]);
+
+ return 0;
+}
+
+static int disable_clk(struct msm_gpu *gpu)
+{
+ struct clk *rate_clk = NULL;
+ int i;
+
+ /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
+ if (gpu->grp_clks[i]) {
+ clk_disable(gpu->grp_clks[i]);
+ rate_clk = gpu->grp_clks[i];
+ }
+ }
+
+ if (rate_clk && gpu->slow_rate)
+ clk_set_rate(rate_clk, gpu->slow_rate);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ if (gpu->grp_clks[i])
+ clk_unprepare(gpu->grp_clks[i]);
+
+ return 0;
+}
+
+static int enable_axi(struct msm_gpu *gpu)
+{
+ if (gpu->ebi1_clk)
+ clk_prepare_enable(gpu->ebi1_clk);
+ if (gpu->bus_freq)
+ bs_set(gpu, gpu->bus_freq);
+ return 0;
+}
+
+static int disable_axi(struct msm_gpu *gpu)
+{
+ if (gpu->ebi1_clk)
+ clk_disable_unprepare(gpu->ebi1_clk);
+ if (gpu->bus_freq)
+ bs_set(gpu, 0);
+ return 0;
+}
+
+int msm_gpu_pm_resume(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret;
+
+ DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (gpu->active_cnt++ > 0)
+ return 0;
+
+ if (WARN_ON(gpu->active_cnt <= 0))
+ return -EINVAL;
+
+ ret = enable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_axi(gpu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret;
+
+ DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (--gpu->active_cnt > 0)
+ return 0;
+
+ if (WARN_ON(gpu->active_cnt < 0))
+ return -EINVAL;
+
+ ret = disable_axi(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Inactivity detection (for suspend):
+ */
+
+static void inactive_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
+ struct drm_device *dev = gpu->dev;
+
+ if (gpu->inactive)
+ return;
+
+ DBG("%s: inactive!\n", gpu->name);
+ mutex_lock(&dev->struct_mutex);
+ if (!(msm_gpu_active(gpu) || gpu->inactive)) {
+ disable_axi(gpu);
+ disable_clk(gpu);
+ gpu->inactive = true;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void inactive_handler(unsigned long data)
+{
+ struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+
+ queue_work(priv->wq, &gpu->inactive_work);
+}
+
+/* cancel inactive timer and make sure we are awake: */
+static void inactive_cancel(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+ del_timer(&gpu->inactive_timer);
+ if (gpu->inactive) {
+ enable_clk(gpu);
+ enable_axi(gpu);
+ gpu->inactive = false;
+ }
+}
+
+static void inactive_start(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+ mod_timer(&gpu->inactive_timer,
+ round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+}
+
+/*
+ * Hangcheck detection for locked gpu:
+ */
+
+static void recover_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
+ struct drm_device *dev = gpu->dev;
+
+ dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+
+ mutex_lock(&dev->struct_mutex);
+ if (msm_gpu_active(gpu)) {
+ inactive_cancel(gpu);
+ gpu->funcs->recover(gpu);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ msm_gpu_retire(gpu);
+}
+
+static void hangcheck_timer_reset(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+ mod_timer(&gpu->hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
+}
+
+static void hangcheck_handler(unsigned long data)
+{
+ struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ uint32_t fence = gpu->funcs->last_fence(gpu);
+
+ if (fence != gpu->hangcheck_fence) {
+ /* some progress has been made.. ya! */
+ gpu->hangcheck_fence = fence;
+ } else if (fence < gpu->submitted_fence) {
+ /* no progress and not done.. hung! */
+ gpu->hangcheck_fence = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
+ gpu->name);
+ dev_err(dev->dev, "%s: completed fence: %u\n",
+ gpu->name, fence);
+ dev_err(dev->dev, "%s: submitted fence: %u\n",
+ gpu->name, gpu->submitted_fence);
+ queue_work(priv->wq, &gpu->recover_work);
+ }
+
+ /* if still more pending work, reset the hangcheck timer: */
+ if (gpu->submitted_fence > gpu->hangcheck_fence)
+ hangcheck_timer_reset(gpu);
+
+ /* workaround for missing irq: */
+ queue_work(priv->wq, &gpu->retire_work);
+}
+
+/*
+ * Performance Counters:
+ */
+
+/* called under perf_lock */
+static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
+{
+ uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
+ int i, n = min(ncntrs, gpu->num_perfcntrs);
+
+ /* read current values: */
+ for (i = 0; i < gpu->num_perfcntrs; i++)
+ current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
+
+ /* update cntrs: */
+ for (i = 0; i < n; i++)
+ cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
+
+ /* save current values: */
+ for (i = 0; i < gpu->num_perfcntrs; i++)
+ gpu->last_cntrs[i] = current_cntrs[i];
+
+ return n;
+}
+
+static void update_sw_cntrs(struct msm_gpu *gpu)
+{
+ ktime_t time;
+ uint32_t elapsed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+ if (!gpu->perfcntr_active)
+ goto out;
+
+ time = ktime_get();
+ elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
+
+ gpu->totaltime += elapsed;
+ if (gpu->last_sample.active)
+ gpu->activetime += elapsed;
+
+ gpu->last_sample.active = msm_gpu_active(gpu);
+ gpu->last_sample.time = time;
+
+out:
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+}
+
+void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+ /* we could dynamically enable/disable perfcntr registers too.. */
+ gpu->last_sample.active = msm_gpu_active(gpu);
+ gpu->last_sample.time = ktime_get();
+ gpu->activetime = gpu->totaltime = 0;
+ gpu->perfcntr_active = true;
+ update_hw_cntrs(gpu, 0, NULL);
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+}
+
+void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
+{
+ gpu->perfcntr_active = false;
+}
+
+/* returns -errno or # of cntrs sampled */
+int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
+ uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+
+ if (!gpu->perfcntr_active) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *activetime = gpu->activetime;
+ *totaltime = gpu->totaltime;
+
+ gpu->activetime = gpu->totaltime = 0;
+
+ ret = update_hw_cntrs(gpu, ncntrs, cntrs);
+
+out:
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Cmdstream submission/retirement:
+ */
+
+static void retire_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+ struct drm_device *dev = gpu->dev;
+ uint32_t fence = gpu->funcs->last_fence(gpu);
+
+ msm_update_fence(gpu->dev, fence);
+
+ mutex_lock(&dev->struct_mutex);
+
+ while (!list_empty(&gpu->active_list)) {
+ struct msm_gem_object *obj;
+
+ obj = list_first_entry(&gpu->active_list,
+ struct msm_gem_object, mm_list);
+
+ if ((obj->read_fence <= fence) &&
+ (obj->write_fence <= fence)) {
+ /* move to inactive: */
+ msm_gem_move_to_inactive(&obj->base);
+ msm_gem_put_iova(&obj->base, gpu->id);
+ drm_gem_object_unreference(&obj->base);
+ } else {
+ break;
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!msm_gpu_active(gpu))
+ inactive_start(gpu);
+}
+
+/* call from irq handler to schedule work to retire bo's */
+void msm_gpu_retire(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ queue_work(priv->wq, &gpu->retire_work);
+ update_sw_cntrs(gpu);
+}
+
+/* add bo's to gpu's ring, and kick gpu: */
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ int i, ret;
+
+ submit->fence = ++priv->next_fence;
+
+ gpu->submitted_fence = submit->fence;
+
+ inactive_cancel(gpu);
+
+ msm_rd_dump_submit(submit);
+
+ gpu->submitted_fence = submit->fence;
+
+ update_sw_cntrs(gpu);
+
+ ret = gpu->funcs->submit(gpu, submit, ctx);
+ priv->lastctx = ctx;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+
+ /* can't happen yet.. but when we add 2d support we'll have
+ * to deal w/ cross-ring synchronization:
+ */
+ WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
+
+ if (!is_active(msm_obj)) {
+ uint32_t iova;
+
+ /* ring takes a reference to the bo and iova: */
+ drm_gem_object_reference(&msm_obj->base);
+ msm_gem_get_iova_locked(&msm_obj->base,
+ submit->gpu->id, &iova);
+ }
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
+ }
+ hangcheck_timer_reset(gpu);
+
+ return ret;
+}
+
+/*
+ * Init/Cleanup:
+ */
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct msm_gpu *gpu = data;
+ return gpu->funcs->irq(gpu);
+}
+
+static const char *clk_names[] = {
+ "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
+};
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, const char *ioname, const char *irqname, int ringsz)
+{
+ struct iommu_domain *iommu;
+ int i, ret;
+
+ if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
+ gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
+
+ gpu->dev = drm;
+ gpu->funcs = funcs;
+ gpu->name = name;
+ gpu->inactive = true;
+
+ INIT_LIST_HEAD(&gpu->active_list);
+ INIT_WORK(&gpu->retire_work, retire_worker);
+ INIT_WORK(&gpu->inactive_work, inactive_worker);
+ INIT_WORK(&gpu->recover_work, recover_worker);
+
+ setup_timer(&gpu->inactive_timer, inactive_handler,
+ (unsigned long)gpu);
+ setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
+ (unsigned long)gpu);
+
+ spin_lock_init(&gpu->perf_lock);
+
+ BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
+
+ /* Map registers: */
+ gpu->mmio = msm_ioremap(pdev, ioname, name);
+ if (IS_ERR(gpu->mmio)) {
+ ret = PTR_ERR(gpu->mmio);
+ goto fail;
+ }
+
+ /* Get Interrupt: */
+ gpu->irq = platform_get_irq_byname(pdev, irqname);
+ if (gpu->irq < 0) {
+ ret = gpu->irq;
+ dev_err(drm->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
+ IRQF_TRIGGER_HIGH, gpu->name, gpu);
+ if (ret) {
+ dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ goto fail;
+ }
+
+ /* Acquire clocks: */
+ for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
+ gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+ DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
+ if (IS_ERR(gpu->grp_clks[i]))
+ gpu->grp_clks[i] = NULL;
+ }
+
+ gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ DBG("ebi1_clk: %p", gpu->ebi1_clk);
+ if (IS_ERR(gpu->ebi1_clk))
+ gpu->ebi1_clk = NULL;
+
+ /* Acquire regulators: */
+ gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
+ DBG("gpu_reg: %p", gpu->gpu_reg);
+ if (IS_ERR(gpu->gpu_reg))
+ gpu->gpu_reg = NULL;
+
+ gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
+ DBG("gpu_cx: %p", gpu->gpu_cx);
+ if (IS_ERR(gpu->gpu_cx))
+ gpu->gpu_cx = NULL;
+
+ /* Setup IOMMU.. eventually we will (I think) do this once per context
+ * and have separate page tables per context. For now, to keep things
+ * simple and to get something working, just use a single address space:
+ */
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (iommu) {
+ dev_info(drm->dev, "%s: using IOMMU\n", name);
+ gpu->mmu = msm_iommu_new(drm, iommu);
+ } else {
+ dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ }
+ gpu->id = msm_register_mmu(drm, gpu->mmu);
+
+ /* Create ringbuffer: */
+ gpu->rb = msm_ringbuffer_new(gpu, ringsz);
+ if (IS_ERR(gpu->rb)) {
+ ret = PTR_ERR(gpu->rb);
+ gpu->rb = NULL;
+ dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
+ goto fail;
+ }
+
+ ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ if (ret) {
+ gpu->rb_iova = 0;
+ dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
+ goto fail;
+ }
+
+ bs_init(gpu);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_gpu_cleanup(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+
+ WARN_ON(!list_empty(&gpu->active_list));
+
+ bs_fini(gpu);
+
+ if (gpu->rb) {
+ if (gpu->rb_iova)
+ msm_gem_put_iova(gpu->rb->bo, gpu->id);
+ msm_ringbuffer_destroy(gpu->rb);
+ }
+
+ if (gpu->mmu)
+ gpu->mmu->funcs->destroy(gpu->mmu);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 00000000000..9b579b79284
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_GPU_H__
+#define __MSM_GPU_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_ringbuffer.h"
+
+struct msm_gem_submit;
+struct msm_gpu_perfcntr;
+
+/* So far, with hardware that I've seen to date, we can have:
+ * + zero, one, or two z180 2d cores
+ * + a3xx or a2xx 3d core, which share a common CP (the firmware
+ * for the CP seems to implement some different PM4 packet types
+ * but the basics of cmdstream submission are the same)
+ *
+ * Which means that the eventual complete "class" hierarchy, once
+ * support for all past and present hw is in place, becomes:
+ * + msm_gpu
+ * + adreno_gpu
+ * + a3xx_gpu
+ * + a2xx_gpu
+ * + z180_gpu
+ */
+struct msm_gpu_funcs {
+ int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+ int (*hw_init)(struct msm_gpu *gpu);
+ int (*pm_suspend)(struct msm_gpu *gpu);
+ int (*pm_resume)(struct msm_gpu *gpu);
+ int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+ void (*flush)(struct msm_gpu *gpu);
+ void (*idle)(struct msm_gpu *gpu);
+ irqreturn_t (*irq)(struct msm_gpu *irq);
+ uint32_t (*last_fence)(struct msm_gpu *gpu);
+ void (*recover)(struct msm_gpu *gpu);
+ void (*destroy)(struct msm_gpu *gpu);
+#ifdef CONFIG_DEBUG_FS
+ /* show GPU status in debugfs: */
+ void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+#endif
+};
+
+struct msm_gpu {
+ const char *name;
+ struct drm_device *dev;
+ const struct msm_gpu_funcs *funcs;
+
+ /* performance counters (hw & sw): */
+ spinlock_t perf_lock;
+ bool perfcntr_active;
+ struct {
+ bool active;
+ ktime_t time;
+ } last_sample;
+ uint32_t totaltime, activetime; /* sw counters */
+ uint32_t last_cntrs[5]; /* hw counters */
+ const struct msm_gpu_perfcntr *perfcntrs;
+ uint32_t num_perfcntrs;
+
+ struct msm_ringbuffer *rb;
+ uint32_t rb_iova;
+
+ /* list of GEM active objects: */
+ struct list_head active_list;
+
+ uint32_t submitted_fence;
+
+ /* is gpu powered/active? */
+ int active_cnt;
+ bool inactive;
+
+ /* worker for handling active-list retiring: */
+ struct work_struct retire_work;
+
+ void __iomem *mmio;
+ int irq;
+
+ struct msm_mmu *mmu;
+ int id;
+
+ /* Power Control: */
+ struct regulator *gpu_reg, *gpu_cx;
+ struct clk *ebi1_clk, *grp_clks[5];
+ uint32_t fast_rate, slow_rate, bus_freq;
+
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bus_scale_table;
+ uint32_t bsc;
+#endif
+
+ /* Hang and Inactivity Detection:
+ */
+#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
+#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
+ struct timer_list inactive_timer;
+ struct work_struct inactive_work;
+#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
+#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ uint32_t hangcheck_fence;
+ struct work_struct recover_work;
+};
+
+static inline bool msm_gpu_active(struct msm_gpu *gpu)
+{
+ return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+}
+
+/* Perf-Counters:
+ * The select_reg and select_val are just there for the benefit of the child
+ * class that actually enables the perf counter.. but msm_gpu base class
+ * will handle sampling/displaying the counters.
+ */
+
+struct msm_gpu_perfcntr {
+ uint32_t select_reg;
+ uint32_t sample_reg;
+ uint32_t select_val;
+ const char *name;
+};
+
+static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
+{
+ msm_writel(data, gpu->mmio + (reg << 2));
+}
+
+static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
+{
+ return msm_readl(gpu->mmio + (reg << 2));
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu);
+int msm_gpu_pm_resume(struct msm_gpu *gpu);
+
+void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
+void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
+int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
+ uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
+
+void msm_gpu_retire(struct msm_gpu *gpu);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, const char *ioname, const char *irqname, int ringsz);
+void msm_gpu_cleanup(struct msm_gpu *gpu);
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+void __init a3xx_register(void);
+void __exit a3xx_unregister(void);
+
+#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
new file mode 100644
index 00000000000..4b2ad9181ed
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+ return -ENOSYS;
+}
+
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct drm_device *dev = mmu->dev;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(dev->dev, "couldn't get %s context", names[i]);
+ continue;
+ }
+ ret = iommu_attach_device(iommu->domain, ctx);
+ if (ret) {
+ dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+ iommu_detach_device(iommu->domain, ctx);
+ }
+}
+
+static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_phys(sg) - sg->offset;
+ size_t bytes = sg->length + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg->length + sg->offset;
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg->length + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ return unmapped;
+
+ VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+
+ BUG_ON(!PAGE_ALIGNED(bytes));
+
+ da += bytes;
+ }
+
+ return 0;
+}
+
+static void msm_iommu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ iommu_domain_free(iommu->domain);
+ kfree(iommu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_iommu_attach,
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+{
+ struct msm_iommu *iommu;
+
+ iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return ERR_PTR(-ENOMEM);
+
+ iommu->domain = domain;
+ msm_mmu_init(&iommu->base, dev, &funcs);
+ iommu_set_fault_handler(domain, msm_fault_handler, dev);
+
+ return &iommu->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
new file mode 100644
index 00000000000..06437745bc2
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_KMS_H__
+#define __MSM_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime. The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+ /* hw initialization: */
+ int (*hw_init)(struct msm_kms *kms);
+ /* irq handling: */
+ void (*irq_preinstall)(struct msm_kms *kms);
+ int (*irq_postinstall)(struct msm_kms *kms);
+ void (*irq_uninstall)(struct msm_kms *kms);
+ irqreturn_t (*irq)(struct msm_kms *kms);
+ int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* misc: */
+ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
+ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder);
+ /* cleanup: */
+ void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+ const struct msm_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+};
+
+static inline void msm_kms_init(struct msm_kms *kms,
+ const struct msm_kms_funcs *funcs)
+{
+ kms->funcs = funcs;
+}
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+
+#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
new file mode 100644
index 00000000000..21da6d154f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_MMU_H__
+#define __MSM_MMU_H__
+
+#include <linux/iommu.h>
+
+struct msm_mmu_funcs {
+ int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+ void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
+ int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ unsigned len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ unsigned len);
+ void (*destroy)(struct msm_mmu *mmu);
+};
+
+struct msm_mmu {
+ const struct msm_mmu_funcs *funcs;
+ struct drm_device *dev;
+};
+
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+ const struct msm_mmu_funcs *funcs)
+{
+ mmu->dev = dev;
+ mmu->funcs = funcs;
+}
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+
+#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
new file mode 100644
index 00000000000..830857c47c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For profiling, userspace can:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/gpu
+ *
+ * This will enable performance counters/profiling to track the busy time
+ * and any gpu specific performance counters that are supported.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+
+struct msm_perf_state {
+ struct drm_device *dev;
+
+ bool open;
+ int cnt;
+ struct mutex read_lock;
+
+ char buf[256];
+ int buftot, bufpos;
+
+ unsigned long next_jiffies;
+
+ struct dentry *ent;
+ struct drm_info_node *node;
+};
+
+#define SAMPLE_TIME (HZ/4)
+
+/* wait for next sample time: */
+static int wait_sample(struct msm_perf_state *perf)
+{
+ unsigned long start_jiffies = jiffies;
+
+ if (time_after(perf->next_jiffies, start_jiffies)) {
+ unsigned long remaining_jiffies =
+ perf->next_jiffies - start_jiffies;
+ int ret = schedule_timeout_interruptible(remaining_jiffies);
+ if (ret > 0) {
+ /* interrupted */
+ return -ERESTARTSYS;
+ }
+ }
+ perf->next_jiffies += SAMPLE_TIME;
+ return 0;
+}
+
+static int refill_buf(struct msm_perf_state *perf)
+{
+ struct msm_drm_private *priv = perf->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ char *ptr = perf->buf;
+ int rem = sizeof(perf->buf);
+ int i, n;
+
+ if ((perf->cnt++ % 32) == 0) {
+ /* Header line: */
+ n = snprintf(ptr, rem, "%%BUSY");
+ ptr += n;
+ rem -= n;
+
+ for (i = 0; i < gpu->num_perfcntrs; i++) {
+ const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
+ n = snprintf(ptr, rem, "\t%s", perfcntr->name);
+ ptr += n;
+ rem -= n;
+ }
+ } else {
+ /* Sample line: */
+ uint32_t activetime = 0, totaltime = 0;
+ uint32_t cntrs[5];
+ uint32_t val;
+ int ret;
+
+ /* sleep until next sample time: */
+ ret = wait_sample(perf);
+ if (ret)
+ return ret;
+
+ ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime,
+ ARRAY_SIZE(cntrs), cntrs);
+ if (ret < 0)
+ return ret;
+
+ val = totaltime ? 1000 * activetime / totaltime : 0;
+ n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
+ ptr += n;
+ rem -= n;
+
+ for (i = 0; i < ret; i++) {
+ /* cycle counters (I think).. convert to MHz.. */
+ val = cntrs[i] / 10000;
+ n = snprintf(ptr, rem, "\t%5d.%02d",
+ val / 100, val % 100);
+ ptr += n;
+ rem -= n;
+ }
+ }
+
+ n = snprintf(ptr, rem, "\n");
+ ptr += n;
+ rem -= n;
+
+ perf->bufpos = 0;
+ perf->buftot = ptr - perf->buf;
+
+ return 0;
+}
+
+static ssize_t perf_read(struct file *file, char __user *buf,
+ size_t sz, loff_t *ppos)
+{
+ struct msm_perf_state *perf = file->private_data;
+ int n = 0, ret;
+
+ mutex_lock(&perf->read_lock);
+
+ if (perf->bufpos >= perf->buftot) {
+ ret = refill_buf(perf);
+ if (ret)
+ goto out;
+ }
+
+ n = min((int)sz, perf->buftot - perf->bufpos);
+ ret = copy_to_user(buf, &perf->buf[perf->bufpos], n);
+ if (ret)
+ goto out;
+
+ perf->bufpos += n;
+ *ppos += n;
+
+out:
+ mutex_unlock(&perf->read_lock);
+ if (ret)
+ return ret;
+ return n;
+}
+
+static int perf_open(struct inode *inode, struct file *file)
+{
+ struct msm_perf_state *perf = inode->i_private;
+ struct drm_device *dev = perf->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (perf->open || !gpu) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ file->private_data = perf;
+ perf->open = true;
+ perf->cnt = 0;
+ perf->buftot = 0;
+ perf->bufpos = 0;
+ msm_gpu_perfcntr_start(gpu);
+ perf->next_jiffies = jiffies + SAMPLE_TIME;
+
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int perf_release(struct inode *inode, struct file *file)
+{
+ struct msm_perf_state *perf = inode->i_private;
+ struct msm_drm_private *priv = perf->dev->dev_private;
+ msm_gpu_perfcntr_stop(priv->gpu);
+ perf->open = false;
+ return 0;
+}
+
+
+static const struct file_operations perf_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = perf_open,
+ .read = perf_read,
+ .llseek = no_llseek,
+ .release = perf_release,
+};
+
+int msm_perf_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_perf_state *perf;
+
+ /* only create on first minor: */
+ if (priv->perf)
+ return 0;
+
+ perf = kzalloc(sizeof(*perf), GFP_KERNEL);
+ if (!perf)
+ return -ENOMEM;
+
+ perf->dev = minor->dev;
+
+ mutex_init(&perf->read_lock);
+ priv->perf = perf;
+
+ perf->node = kzalloc(sizeof(*perf->node), GFP_KERNEL);
+ if (!perf->node)
+ goto fail;
+
+ perf->ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
+ minor->debugfs_root, perf, &perf_debugfs_fops);
+ if (!perf->ent) {
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/perf\n",
+ minor->debugfs_root->d_name.name);
+ goto fail;
+ }
+
+ perf->node->minor = minor;
+ perf->node->dent = perf->ent;
+ perf->node->info_ent = NULL;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&perf->node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+
+fail:
+ msm_perf_debugfs_cleanup(minor);
+ return -1;
+}
+
+void msm_perf_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_perf_state *perf = priv->perf;
+
+ if (!perf)
+ return;
+
+ priv->perf = NULL;
+
+ debugfs_remove(perf->ent);
+
+ if (perf->node) {
+ mutex_lock(&minor->debugfs_lock);
+ list_del(&perf->node->list);
+ mutex_unlock(&minor->debugfs_lock);
+ kfree(perf->node);
+ }
+
+ mutex_destroy(&perf->read_lock);
+
+ kfree(perf);
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
new file mode 100644
index 00000000000..9a78c48817c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For debugging crashes, userspace can:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
+ *
+ * To log the cmdstream in a format that is understood by freedreno/cffdump
+ * utility. By comparing the last successfully completed fence #, to the
+ * cmdstream for the next fence, you can narrow down which process and submit
+ * caused the gpu crash/lockup.
+ *
+ * This bypasses drm_debugfs_create_files() mainly because we need to use
+ * our own fops for a bit more control. In particular, we don't want to
+ * do anything if userspace doesn't have the debugfs file open.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <linux/wait.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+enum rd_sect_type {
+ RD_NONE,
+ RD_TEST, /* ascii text */
+ RD_CMD, /* ascii text */
+ RD_GPUADDR, /* u32 gpuaddr, u32 size */
+ RD_CONTEXT, /* raw dump */
+ RD_CMDSTREAM, /* raw dump */
+ RD_CMDSTREAM_ADDR, /* gpu addr of cmdstream */
+ RD_PARAM, /* u32 param_type, u32 param_val, u32 bitlen */
+ RD_FLUSH, /* empty, clear previous params */
+ RD_PROGRAM, /* shader program, raw dump */
+ RD_VERT_SHADER,
+ RD_FRAG_SHADER,
+ RD_BUFFER_CONTENTS,
+ RD_GPU_ID,
+};
+
+#define BUF_SZ 512 /* should be power of 2 */
+
+/* space used: */
+#define circ_count(circ) \
+ (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
+#define circ_count_to_end(circ) \
+ (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
+/* space available: */
+#define circ_space(circ) \
+ (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
+#define circ_space_to_end(circ) \
+ (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
+
+struct msm_rd_state {
+ struct drm_device *dev;
+
+ bool open;
+
+ struct dentry *ent;
+ struct drm_info_node *node;
+
+ /* current submit to read out: */
+ struct msm_gem_submit *submit;
+
+ /* fifo access is synchronized on the producer side by
+ * struct_mutex held by submit code (otherwise we could
+ * end up w/ cmds logged in different order than they
+ * were executed). And read_lock synchronizes the reads
+ */
+ struct mutex read_lock;
+
+ wait_queue_head_t fifo_event;
+ struct circ_buf fifo;
+
+ char buf[BUF_SZ];
+};
+
+static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
+{
+ struct circ_buf *fifo = &rd->fifo;
+ const char *ptr = buf;
+
+ while (sz > 0) {
+ char *fptr = &fifo->buf[fifo->head];
+ int n;
+
+ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+
+ n = min(sz, circ_space_to_end(&rd->fifo));
+ memcpy(fptr, ptr, n);
+
+ fifo->head = (fifo->head + n) & (BUF_SZ - 1);
+ sz -= n;
+ ptr += n;
+
+ wake_up_all(&rd->fifo_event);
+ }
+}
+
+static void rd_write_section(struct msm_rd_state *rd,
+ enum rd_sect_type type, const void *buf, int sz)
+{
+ rd_write(rd, &type, 4);
+ rd_write(rd, &sz, 4);
+ rd_write(rd, buf, sz);
+}
+
+static ssize_t rd_read(struct file *file, char __user *buf,
+ size_t sz, loff_t *ppos)
+{
+ struct msm_rd_state *rd = file->private_data;
+ struct circ_buf *fifo = &rd->fifo;
+ const char *fptr = &fifo->buf[fifo->tail];
+ int n = 0, ret = 0;
+
+ mutex_lock(&rd->read_lock);
+
+ ret = wait_event_interruptible(rd->fifo_event,
+ circ_count(&rd->fifo) > 0);
+ if (ret)
+ goto out;
+
+ n = min_t(int, sz, circ_count_to_end(&rd->fifo));
+ ret = copy_to_user(buf, fptr, n);
+ if (ret)
+ goto out;
+
+ fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
+ *ppos += n;
+
+ wake_up_all(&rd->fifo_event);
+
+out:
+ mutex_unlock(&rd->read_lock);
+ if (ret)
+ return ret;
+ return n;
+}
+
+static int rd_open(struct inode *inode, struct file *file)
+{
+ struct msm_rd_state *rd = inode->i_private;
+ struct drm_device *dev = rd->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ uint64_t val;
+ uint32_t gpu_id;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (rd->open || !gpu) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ file->private_data = rd;
+ rd->open = true;
+
+ /* the parsing tools need to know gpu-id to know which
+ * register database to load.
+ */
+ gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+ gpu_id = val;
+
+ rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
+
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int rd_release(struct inode *inode, struct file *file)
+{
+ struct msm_rd_state *rd = inode->i_private;
+ rd->open = false;
+ return 0;
+}
+
+
+static const struct file_operations rd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = rd_open,
+ .read = rd_read,
+ .llseek = no_llseek,
+ .release = rd_release,
+};
+
+int msm_rd_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_rd_state *rd;
+
+ /* only create on first minor: */
+ if (priv->rd)
+ return 0;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->dev = minor->dev;
+ rd->fifo.buf = rd->buf;
+
+ mutex_init(&rd->read_lock);
+ priv->rd = rd;
+
+ init_waitqueue_head(&rd->fifo_event);
+
+ rd->node = kzalloc(sizeof(*rd->node), GFP_KERNEL);
+ if (!rd->node)
+ goto fail;
+
+ rd->ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
+ minor->debugfs_root, rd, &rd_debugfs_fops);
+ if (!rd->ent) {
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/rd\n",
+ minor->debugfs_root->d_name.name);
+ goto fail;
+ }
+
+ rd->node->minor = minor;
+ rd->node->dent = rd->ent;
+ rd->node->info_ent = NULL;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&rd->node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+
+fail:
+ msm_rd_debugfs_cleanup(minor);
+ return -1;
+}
+
+void msm_rd_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_rd_state *rd = priv->rd;
+
+ if (!rd)
+ return;
+
+ priv->rd = NULL;
+
+ debugfs_remove(rd->ent);
+
+ if (rd->node) {
+ mutex_lock(&minor->debugfs_lock);
+ list_del(&rd->node->list);
+ mutex_unlock(&minor->debugfs_lock);
+ kfree(rd->node);
+ }
+
+ mutex_destroy(&rd->read_lock);
+
+ kfree(rd);
+}
+
+/* called under struct_mutex */
+void msm_rd_dump_submit(struct msm_gem_submit *submit)
+{
+ struct drm_device *dev = submit->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_rd_state *rd = priv->rd;
+ char msg[128];
+ int i, n;
+
+ if (!rd->open)
+ return;
+
+ /* writing into fifo is serialized by caller, and
+ * rd->read_lock is used to serialize the reads
+ */
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ TASK_COMM_LEN, current->comm, task_pid_nr(current),
+ submit->fence);
+
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+
+ /* could be nice to have an option (module-param?) to snapshot
+ * all the bo's associated with the submit. Handy to see vtx
+ * buffers, etc. For now just the cmdstream bo's is enough.
+ */
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint32_t idx = submit->cmd[i].idx;
+ uint32_t iova = submit->cmd[i].iova;
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
+ struct msm_gem_object *obj = submit->bos[idx].obj;
+ const char *buf = msm_gem_vaddr_locked(&obj->base);
+
+ buf += iova - submit->bos[idx].iova;
+
+ rd_write_section(rd, RD_GPUADDR,
+ (uint32_t[2]){ iova, szd * 4 }, 8);
+ rd_write_section(rd, RD_BUFFER_CONTENTS,
+ buf, szd * 4);
+
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets, we've logged the buffer, the
+ * parser tool will follow the IB based on the logged
+ * buffer/gpuaddr, so nothing more to do.
+ */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ case MSM_SUBMIT_CMD_BUF:
+ rd_write_section(rd, RD_CMDSTREAM_ADDR,
+ (uint32_t[2]){ iova, szd }, 8);
+ break;
+ }
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 00000000000..8171537dd7d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_ringbuffer.h"
+#include "msm_gpu.h"
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+{
+ struct msm_ringbuffer *ring;
+ int ret;
+
+ size = ALIGN(size, 4); /* size should be dword aligned */
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ring->gpu = gpu;
+ ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+ if (IS_ERR(ring->bo)) {
+ ret = PTR_ERR(ring->bo);
+ ring->bo = NULL;
+ goto fail;
+ }
+
+ ring->start = msm_gem_vaddr_locked(ring->bo);
+ ring->end = ring->start + (size / 4);
+ ring->cur = ring->start;
+
+ ring->size = size;
+
+ return ring;
+
+fail:
+ if (ring)
+ msm_ringbuffer_destroy(ring);
+ return ERR_PTR(ret);
+}
+
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
+{
+ if (ring->bo)
+ drm_gem_object_unreference(ring->bo);
+ kfree(ring);
+}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 00000000000..6e0e1049fa4
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_RINGBUFFER_H__
+#define __MSM_RINGBUFFER_H__
+
+#include "msm_drv.h"
+
+struct msm_ringbuffer {
+ struct msm_gpu *gpu;
+ int size;
+ struct drm_gem_object *bo;
+ uint32_t *start, *end, *cur;
+};
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
+
+/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
+
+static inline void
+OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
+{
+ if (ring->cur == ring->end)
+ ring->cur = ring->start;
+ *(ring->cur++) = data;
+}
+
+#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index b1bc1ea182b..637c29a3312 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,45 +1,62 @@
config DRM_NOUVEAU
tristate "Nouveau (nVidia) cards"
- depends on DRM
+ depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select DRM_TTM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB
- select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
+ select MXM_WMI if ACPI && X86
+ select POWER_SUPPLY
+ # Similar to i915, we need to select ACPI_VIDEO and it's dependencies
+ select BACKLIGHT_LCD_SUPPORT if ACPI && X86
+ select BACKLIGHT_CLASS_DEVICE if ACPI && X86
+ select INPUT if ACPI && X86
+ select THERMAL if ACPI && X86
+ select ACPI_VIDEO if ACPI && X86
help
Choose this option for open-source nVidia support.
-config DRM_NOUVEAU_BACKLIGHT
- bool "Support for backlight control"
+config NOUVEAU_DEBUG
+ int "Maximum debug level"
depends on DRM_NOUVEAU
- default y
+ range 0 7
+ default 5
help
- Say Y here if you want to control the backlight of your display
- (e.g. a laptop panel).
+ Selects the maximum debug level to compile support for.
-config DRM_NOUVEAU_DEBUG
- bool "Build in Nouveau's debugfs support"
- depends on DRM_NOUVEAU && DEBUG_FS
- default y
- help
- Say Y here if you want Nouveau to output debugging information
- via debugfs.
+ 0 - fatal
+ 1 - error
+ 2 - warning
+ 3 - info
+ 4 - debug
+ 5 - trace (recommended)
+ 6 - paranoia
+ 7 - spam
-menu "I2C encoder or helper chips"
- depends on DRM && I2C
+ The paranoia and spam levels will add a lot of extra checks which
+ may potentially slow down driver operation.
-config DRM_I2C_CH7006
- tristate "Chrontel ch7006 TV encoder"
+config NOUVEAU_DEBUG_DEFAULT
+ int "Default debug level"
depends on DRM_NOUVEAU
- default m
+ range 0 7
+ default 3
help
- Support for Chrontel ch7006 and similar TV encoders, found
- on some nVidia video cards.
+ Selects the default debug level
- This driver is currently only useful if you're also using
- the nouveau driver.
-endmenu
+config DRM_NOUVEAU_BACKLIGHT
+ bool "Support for backlight control"
+ depends on DRM_NOUVEAU
+ default y
+ help
+ Say Y here if you want to control the backlight of your display
+ (e.g. a laptop panel).
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 48c290b5da8..8b307e14363 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -3,30 +3,349 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
- nouveau_object.o nouveau_irq.o nouveau_notifier.o \
- nouveau_sgdma.o nouveau_dma.o \
- nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
- nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
- nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o nouveau_grctx.o \
- nv04_timer.o \
- nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv40_fb.o \
- nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
- nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o \
- nv40_grctx.o \
- nv04_instmem.o nv50_instmem.o \
- nv50_crtc.o nv50_dac.o nv50_sor.o \
- nv50_cursor.o nv50_display.o nv50_fbcon.o \
- nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
- nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
- nv17_gpio.o
-
-nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
+ccflags-y += -I$(src)/core/include
+ccflags-y += -I$(src)/core
+ccflags-y += -I$(src)
+
+nouveau-y := core/core/client.o
+nouveau-y += core/core/engctx.o
+nouveau-y += core/core/engine.o
+nouveau-y += core/core/enum.o
+nouveau-y += core/core/event.o
+nouveau-y += core/core/gpuobj.o
+nouveau-y += core/core/handle.o
+nouveau-y += core/core/mm.o
+nouveau-y += core/core/namedb.o
+nouveau-y += core/core/object.o
+nouveau-y += core/core/option.o
+nouveau-y += core/core/parent.o
+nouveau-y += core/core/printk.o
+nouveau-y += core/core/ramht.o
+nouveau-y += core/core/subdev.o
+
+nouveau-y += core/subdev/bar/base.o
+nouveau-y += core/subdev/bar/nv50.o
+nouveau-y += core/subdev/bar/nvc0.o
+nouveau-y += core/subdev/bios/base.o
+nouveau-y += core/subdev/bios/bit.o
+nouveau-y += core/subdev/bios/boost.o
+nouveau-y += core/subdev/bios/conn.o
+nouveau-y += core/subdev/bios/cstep.o
+nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
+nouveau-y += core/subdev/bios/dp.o
+nouveau-y += core/subdev/bios/extdev.o
+nouveau-y += core/subdev/bios/gpio.o
+nouveau-y += core/subdev/bios/i2c.o
+nouveau-y += core/subdev/bios/init.o
+nouveau-y += core/subdev/bios/mxm.o
+nouveau-y += core/subdev/bios/perf.o
+nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/ramcfg.o
+nouveau-y += core/subdev/bios/rammap.o
+nouveau-y += core/subdev/bios/timing.o
+nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/vmap.o
+nouveau-y += core/subdev/bios/volt.o
+nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bios/P0260.o
+nouveau-y += core/subdev/bus/hwsq.o
+nouveau-y += core/subdev/bus/nv04.o
+nouveau-y += core/subdev/bus/nv31.o
+nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nv94.o
+nouveau-y += core/subdev/bus/nvc0.o
+nouveau-y += core/subdev/clock/base.o
+nouveau-y += core/subdev/clock/nv04.o
+nouveau-y += core/subdev/clock/nv40.o
+nouveau-y += core/subdev/clock/nv50.o
+nouveau-y += core/subdev/clock/nv84.o
+nouveau-y += core/subdev/clock/nva3.o
+nouveau-y += core/subdev/clock/nvaa.o
+nouveau-y += core/subdev/clock/nvc0.o
+nouveau-y += core/subdev/clock/nve0.o
+nouveau-y += core/subdev/clock/pllnv04.o
+nouveau-y += core/subdev/clock/pllnva3.o
+nouveau-y += core/subdev/devinit/base.o
+nouveau-y += core/subdev/devinit/nv04.o
+nouveau-y += core/subdev/devinit/nv05.o
+nouveau-y += core/subdev/devinit/nv10.o
+nouveau-y += core/subdev/devinit/nv1a.o
+nouveau-y += core/subdev/devinit/nv20.o
+nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/devinit/nv84.o
+nouveau-y += core/subdev/devinit/nv98.o
+nouveau-y += core/subdev/devinit/nva3.o
+nouveau-y += core/subdev/devinit/nvaf.o
+nouveau-y += core/subdev/devinit/nvc0.o
+nouveau-y += core/subdev/devinit/gm107.o
+nouveau-y += core/subdev/fb/base.o
+nouveau-y += core/subdev/fb/nv04.o
+nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
+nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
+nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
+nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
+nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nv84.o
+nouveau-y += core/subdev/fb/nva3.o
+nouveau-y += core/subdev/fb/nvaa.o
+nouveau-y += core/subdev/fb/nvaf.o
+nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/fb/nve0.o
+nouveau-y += core/subdev/fb/gk20a.o
+nouveau-y += core/subdev/fb/gm107.o
+nouveau-y += core/subdev/fb/ramnv04.o
+nouveau-y += core/subdev/fb/ramnv10.o
+nouveau-y += core/subdev/fb/ramnv1a.o
+nouveau-y += core/subdev/fb/ramnv20.o
+nouveau-y += core/subdev/fb/ramnv40.o
+nouveau-y += core/subdev/fb/ramnv41.o
+nouveau-y += core/subdev/fb/ramnv44.o
+nouveau-y += core/subdev/fb/ramnv49.o
+nouveau-y += core/subdev/fb/ramnv4e.o
+nouveau-y += core/subdev/fb/ramnv50.o
+nouveau-y += core/subdev/fb/ramnva3.o
+nouveau-y += core/subdev/fb/ramnvaa.o
+nouveau-y += core/subdev/fb/ramnvc0.o
+nouveau-y += core/subdev/fb/ramnve0.o
+nouveau-y += core/subdev/fb/ramgk20a.o
+nouveau-y += core/subdev/fb/ramgm107.o
+nouveau-y += core/subdev/fb/sddr3.o
+nouveau-y += core/subdev/fb/gddr5.o
+nouveau-y += core/subdev/gpio/base.o
+nouveau-y += core/subdev/gpio/nv10.o
+nouveau-y += core/subdev/gpio/nv50.o
+nouveau-y += core/subdev/gpio/nv92.o
+nouveau-y += core/subdev/gpio/nvd0.o
+nouveau-y += core/subdev/gpio/nve0.o
+nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/i2c/anx9805.o
+nouveau-y += core/subdev/i2c/aux.o
+nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/i2c/pad.o
+nouveau-y += core/subdev/i2c/padnv04.o
+nouveau-y += core/subdev/i2c/padnv94.o
+nouveau-y += core/subdev/i2c/nv04.o
+nouveau-y += core/subdev/i2c/nv4e.o
+nouveau-y += core/subdev/i2c/nv50.o
+nouveau-y += core/subdev/i2c/nv94.o
+nouveau-y += core/subdev/i2c/nvd0.o
+nouveau-y += core/subdev/i2c/gf117.o
+nouveau-y += core/subdev/i2c/nve0.o
+nouveau-y += core/subdev/ibus/nvc0.o
+nouveau-y += core/subdev/ibus/nve0.o
+nouveau-y += core/subdev/ibus/gk20a.o
+nouveau-y += core/subdev/instmem/base.o
+nouveau-y += core/subdev/instmem/nv04.o
+nouveau-y += core/subdev/instmem/nv40.o
+nouveau-y += core/subdev/instmem/nv50.o
+nouveau-y += core/subdev/ltcg/gf100.o
+nouveau-y += core/subdev/ltcg/gm107.o
+nouveau-y += core/subdev/mc/base.o
+nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv40.o
+nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv4c.o
+nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/mc/nv94.o
+nouveau-y += core/subdev/mc/nv98.o
+nouveau-y += core/subdev/mc/nvc0.o
+nouveau-y += core/subdev/mc/nvc3.o
+nouveau-y += core/subdev/mxm/base.o
+nouveau-y += core/subdev/mxm/mxms.o
+nouveau-y += core/subdev/mxm/nv50.o
+nouveau-y += core/subdev/pwr/base.o
+nouveau-y += core/subdev/pwr/memx.o
+nouveau-y += core/subdev/pwr/nva3.o
+nouveau-y += core/subdev/pwr/nvc0.o
+nouveau-y += core/subdev/pwr/nvd0.o
+nouveau-y += core/subdev/pwr/nv108.o
+nouveau-y += core/subdev/therm/base.o
+nouveau-y += core/subdev/therm/fan.o
+nouveau-y += core/subdev/therm/fannil.o
+nouveau-y += core/subdev/therm/fanpwm.o
+nouveau-y += core/subdev/therm/fantog.o
+nouveau-y += core/subdev/therm/ic.o
+nouveau-y += core/subdev/therm/temp.o
+nouveau-y += core/subdev/therm/nv40.o
+nouveau-y += core/subdev/therm/nv50.o
+nouveau-y += core/subdev/therm/nv84.o
+nouveau-y += core/subdev/therm/nva3.o
+nouveau-y += core/subdev/therm/nvd0.o
+nouveau-y += core/subdev/timer/base.o
+nouveau-y += core/subdev/timer/nv04.o
+nouveau-y += core/subdev/timer/gk20a.o
+nouveau-y += core/subdev/vm/base.o
+nouveau-y += core/subdev/vm/nv04.o
+nouveau-y += core/subdev/vm/nv41.o
+nouveau-y += core/subdev/vm/nv44.o
+nouveau-y += core/subdev/vm/nv50.o
+nouveau-y += core/subdev/vm/nvc0.o
+nouveau-y += core/subdev/volt/base.o
+nouveau-y += core/subdev/volt/gpio.o
+nouveau-y += core/subdev/volt/nv40.o
+
+nouveau-y += core/engine/falcon.o
+nouveau-y += core/engine/xtensa.o
+nouveau-y += core/engine/dmaobj/base.o
+nouveau-y += core/engine/dmaobj/nv04.o
+nouveau-y += core/engine/dmaobj/nv50.o
+nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
+nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nv98.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
+nouveau-y += core/engine/copy/nva3.o
+nouveau-y += core/engine/copy/nvc0.o
+nouveau-y += core/engine/copy/nve0.o
+nouveau-y += core/engine/crypt/nv84.o
+nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/device/base.o
+nouveau-y += core/engine/device/ctrl.o
+nouveau-y += core/engine/device/nv04.o
+nouveau-y += core/engine/device/nv10.o
+nouveau-y += core/engine/device/nv20.o
+nouveau-y += core/engine/device/nv30.o
+nouveau-y += core/engine/device/nv40.o
+nouveau-y += core/engine/device/nv50.o
+nouveau-y += core/engine/device/nvc0.o
+nouveau-y += core/engine/device/nve0.o
+nouveau-y += core/engine/device/gm100.o
+nouveau-y += core/engine/disp/base.o
+nouveau-y += core/engine/disp/conn.o
+nouveau-y += core/engine/disp/outp.o
+nouveau-y += core/engine/disp/outpdp.o
+nouveau-y += core/engine/disp/nv04.o
+nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
+nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/nvf0.o
+nouveau-y += core/engine/disp/gm107.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/dport.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/piornv50.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
+nouveau-y += core/engine/disp/vga.o
+nouveau-y += core/engine/fifo/base.o
+nouveau-y += core/engine/fifo/nv04.o
+nouveau-y += core/engine/fifo/nv10.o
+nouveau-y += core/engine/fifo/nv17.o
+nouveau-y += core/engine/fifo/nv40.o
+nouveau-y += core/engine/fifo/nv50.o
+nouveau-y += core/engine/fifo/nv84.o
+nouveau-y += core/engine/fifo/nvc0.o
+nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/fifo/gk20a.o
+nouveau-y += core/engine/fifo/nv108.o
+nouveau-y += core/engine/graph/ctxnv40.o
+nouveau-y += core/engine/graph/ctxnv50.o
+nouveau-y += core/engine/graph/ctxnvc0.o
+nouveau-y += core/engine/graph/ctxnvc1.o
+nouveau-y += core/engine/graph/ctxnvc4.o
+nouveau-y += core/engine/graph/ctxnvc8.o
+nouveau-y += core/engine/graph/ctxnvd7.o
+nouveau-y += core/engine/graph/ctxnvd9.o
+nouveau-y += core/engine/graph/ctxnve4.o
+nouveau-y += core/engine/graph/ctxgk20a.o
+nouveau-y += core/engine/graph/ctxnvf0.o
+nouveau-y += core/engine/graph/ctxnv108.o
+nouveau-y += core/engine/graph/ctxgm107.o
+nouveau-y += core/engine/graph/nv04.o
+nouveau-y += core/engine/graph/nv10.o
+nouveau-y += core/engine/graph/nv20.o
+nouveau-y += core/engine/graph/nv25.o
+nouveau-y += core/engine/graph/nv2a.o
+nouveau-y += core/engine/graph/nv30.o
+nouveau-y += core/engine/graph/nv34.o
+nouveau-y += core/engine/graph/nv35.o
+nouveau-y += core/engine/graph/nv40.o
+nouveau-y += core/engine/graph/nv50.o
+nouveau-y += core/engine/graph/nvc0.o
+nouveau-y += core/engine/graph/nvc1.o
+nouveau-y += core/engine/graph/nvc4.o
+nouveau-y += core/engine/graph/nvc8.o
+nouveau-y += core/engine/graph/nvd7.o
+nouveau-y += core/engine/graph/nvd9.o
+nouveau-y += core/engine/graph/nve4.o
+nouveau-y += core/engine/graph/gk20a.o
+nouveau-y += core/engine/graph/nvf0.o
+nouveau-y += core/engine/graph/nv108.o
+nouveau-y += core/engine/graph/gm107.o
+nouveau-y += core/engine/mpeg/nv31.o
+nouveau-y += core/engine/mpeg/nv40.o
+nouveau-y += core/engine/mpeg/nv44.o
+nouveau-y += core/engine/mpeg/nv50.o
+nouveau-y += core/engine/mpeg/nv84.o
+nouveau-y += core/engine/perfmon/base.o
+nouveau-y += core/engine/perfmon/daemon.o
+nouveau-y += core/engine/perfmon/nv40.o
+nouveau-y += core/engine/perfmon/nv50.o
+nouveau-y += core/engine/perfmon/nv84.o
+nouveau-y += core/engine/perfmon/nva3.o
+nouveau-y += core/engine/perfmon/nvc0.o
+nouveau-y += core/engine/perfmon/nve0.o
+nouveau-y += core/engine/perfmon/nvf0.o
+nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
+nouveau-y += core/engine/software/nv04.o
+nouveau-y += core/engine/software/nv10.o
+nouveau-y += core/engine/software/nv50.o
+nouveau-y += core/engine/software/nvc0.o
+nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nv98.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
+
+# drm/core
+nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
+nouveau-y += nouveau_vga.o nouveau_agp.o
+nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
+nouveau-y += nouveau_prime.o nouveau_abi16.o
+nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
+nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
+
+# drm/kms
+nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
+nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
+
+# drm/kms/nv04:nv50
+include $(src)/dispnv04/Makefile
+
+# drm/kms/nv50-
+nouveau-y += nv50_display.o
+
+# drm/pm
+nouveau-y += nouveau_hwmon.o nouveau_sysfs.o
+
+# other random bits
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
-nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+ifdef CONFIG_X86
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+endif
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
new file mode 100644
index 00000000000..9079c0ac58e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/option.h>
+
+#include <engine/device.h>
+
+static void
+nouveau_client_dtor(struct nouveau_object *object)
+{
+ struct nouveau_client *client = (void *)object;
+ nouveau_object_ref(NULL, &client->device);
+ nouveau_handle_destroy(client->root);
+ nouveau_namedb_destroy(&client->base);
+}
+
+static struct nouveau_oclass
+nouveau_client_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_client_dtor,
+ },
+};
+
+int
+nouveau_client_create_(const char *name, u64 devname, const char *cfg,
+ const char *dbg, int length, void **pobject)
+{
+ struct nouveau_object *device;
+ struct nouveau_client *client;
+ int ret;
+
+ device = (void *)nouveau_device_find(devname);
+ if (!device)
+ return -ENODEV;
+
+ ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
+ NV_CLIENT_CLASS, NULL,
+ (1ULL << NVDEV_ENGINE_DEVICE),
+ length, pobject);
+ client = *pobject;
+ if (ret)
+ return ret;
+
+ ret = nouveau_handle_create(nv_object(client), ~0, ~0,
+ nv_object(client), &client->root);
+ if (ret)
+ return ret;
+
+ /* prevent init/fini being called, os in in charge of this */
+ atomic_set(&nv_object(client)->usecount, 2);
+
+ nouveau_object_ref(device, &client->device);
+ snprintf(client->name, sizeof(client->name), "%s", name);
+ client->debug = nouveau_dbgopt(dbg, "CLIENT");
+ return 0;
+}
+
+int
+nouveau_client_init(struct nouveau_client *client)
+{
+ int ret;
+ nv_debug(client, "init running\n");
+ ret = nouveau_handle_init(client->root);
+ nv_debug(client, "init completed with %d\n", ret);
+ return ret;
+}
+
+int
+nouveau_client_fini(struct nouveau_client *client, bool suspend)
+{
+ const char *name[2] = { "fini", "suspend" };
+ int ret;
+
+ nv_debug(client, "%s running\n", name[suspend]);
+ ret = nouveau_handle_fini(client->root, suspend);
+ nv_debug(client, "%s completed with %d\n", name[suspend], ret);
+ return ret;
+}
+
+const char *
+nouveau_client_name(void *obj)
+{
+ const char *client_name = "unknown";
+ struct nouveau_client *client = nouveau_client(obj);
+ if (client)
+ client_name = client->name;
+ return client_name;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
new file mode 100644
index 00000000000..84c71fad2b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/client.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+
+static inline int
+nouveau_engctx_exists(struct nouveau_object *parent,
+ struct nouveau_engine *engine, void **pobject)
+{
+ struct nouveau_engctx *engctx;
+ struct nouveau_object *parctx;
+
+ list_for_each_entry(engctx, &engine->contexts, head) {
+ parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
+ if (parctx == parent) {
+ atomic_inc(&nv_object(engctx)->refcount);
+ *pobject = engctx;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int
+nouveau_engctx_create_(struct nouveau_object *parent,
+ struct nouveau_object *engobj,
+ struct nouveau_oclass *oclass,
+ struct nouveau_object *pargpu,
+ u32 size, u32 align, u32 flags,
+ int length, void **pobject)
+{
+ struct nouveau_client *client = nouveau_client(parent);
+ struct nouveau_engine *engine = nv_engine(engobj);
+ struct nouveau_object *engctx;
+ unsigned long save;
+ int ret;
+
+ /* check if this engine already has a context for the parent object,
+ * and reference it instead of creating a new one
+ */
+ spin_lock_irqsave(&engine->lock, save);
+ ret = nouveau_engctx_exists(parent, engine, pobject);
+ spin_unlock_irqrestore(&engine->lock, save);
+ if (ret)
+ return ret;
+
+ /* create the new context, supports creating both raw objects and
+ * objects backed by instance memory
+ */
+ if (size) {
+ ret = nouveau_gpuobj_create_(parent, engobj, oclass,
+ NV_ENGCTX_CLASS,
+ pargpu, size, align, flags,
+ length, pobject);
+ } else {
+ ret = nouveau_object_create_(parent, engobj, oclass,
+ NV_ENGCTX_CLASS, length, pobject);
+ }
+
+ engctx = *pobject;
+ if (ret)
+ return ret;
+
+ /* must take the lock again and re-check a context doesn't already
+ * exist (in case of a race) - the lock had to be dropped before as
+ * it's not possible to allocate the object with it held.
+ */
+ spin_lock_irqsave(&engine->lock, save);
+ ret = nouveau_engctx_exists(parent, engine, pobject);
+ if (ret) {
+ spin_unlock_irqrestore(&engine->lock, save);
+ nouveau_object_ref(NULL, &engctx);
+ return ret;
+ }
+
+ if (client->vm)
+ atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
+ list_add(&nv_engctx(engctx)->head, &engine->contexts);
+ nv_engctx(engctx)->addr = ~0ULL;
+ spin_unlock_irqrestore(&engine->lock, save);
+ return 0;
+}
+
+void
+nouveau_engctx_destroy(struct nouveau_engctx *engctx)
+{
+ struct nouveau_object *engobj = nv_object(engctx)->engine;
+ struct nouveau_engine *engine = nv_engine(engobj);
+ struct nouveau_client *client = nouveau_client(engctx);
+ unsigned long save;
+
+ nouveau_gpuobj_unmap(&engctx->vma);
+ spin_lock_irqsave(&engine->lock, save);
+ list_del(&engctx->head);
+ spin_unlock_irqrestore(&engine->lock, save);
+
+ if (client->vm)
+ atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
+
+ if (engctx->base.size)
+ nouveau_gpuobj_destroy(&engctx->base);
+ else
+ nouveau_object_destroy(&engctx->base.base);
+}
+
+int
+nouveau_engctx_init(struct nouveau_engctx *engctx)
+{
+ struct nouveau_object *object = nv_object(engctx);
+ struct nouveau_subdev *subdev = nv_subdev(object->engine);
+ struct nouveau_object *parent;
+ struct nouveau_subdev *pardev;
+ int ret;
+
+ ret = nouveau_gpuobj_init(&engctx->base);
+ if (ret)
+ return ret;
+
+ parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+ pardev = nv_subdev(parent->engine);
+ if (nv_parent(parent)->context_attach) {
+ mutex_lock(&pardev->mutex);
+ ret = nv_parent(parent)->context_attach(parent, object);
+ mutex_unlock(&pardev->mutex);
+ }
+
+ if (ret) {
+ nv_error(parent, "failed to attach %s context, %d\n",
+ subdev->name, ret);
+ return ret;
+ }
+
+ nv_debug(parent, "attached %s context\n", subdev->name);
+ return 0;
+}
+
+int
+nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
+{
+ struct nouveau_object *object = nv_object(engctx);
+ struct nouveau_subdev *subdev = nv_subdev(object->engine);
+ struct nouveau_object *parent;
+ struct nouveau_subdev *pardev;
+ int ret = 0;
+
+ parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+ pardev = nv_subdev(parent->engine);
+ if (nv_parent(parent)->context_detach) {
+ mutex_lock(&pardev->mutex);
+ ret = nv_parent(parent)->context_detach(parent, suspend, object);
+ mutex_unlock(&pardev->mutex);
+ }
+
+ if (ret) {
+ nv_error(parent, "failed to detach %s context, %d\n",
+ subdev->name, ret);
+ return ret;
+ }
+
+ nv_debug(parent, "detached %s context\n", subdev->name);
+ return nouveau_gpuobj_fini(&engctx->base, suspend);
+}
+
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_engctx *engctx;
+ int ret;
+
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+ NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+ *pobject = nv_object(engctx);
+ return ret;
+}
+
+void
+_nouveau_engctx_dtor(struct nouveau_object *object)
+{
+ nouveau_engctx_destroy(nv_engctx(object));
+}
+
+int
+_nouveau_engctx_init(struct nouveau_object *object)
+{
+ return nouveau_engctx_init(nv_engctx(object));
+}
+
+
+int
+_nouveau_engctx_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_engctx_fini(nv_engctx(object), suspend);
+}
+
+struct nouveau_object *
+nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
+{
+ struct nouveau_engctx *engctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->lock, flags);
+ list_for_each_entry(engctx, &engine->contexts, head) {
+ if (engctx->addr == addr) {
+ engctx->save = flags;
+ return nv_object(engctx);
+ }
+ }
+ spin_unlock_irqrestore(&engine->lock, flags);
+ return NULL;
+}
+
+void
+nouveau_engctx_put(struct nouveau_object *object)
+{
+ if (object) {
+ struct nouveau_engine *engine = nv_engine(object->engine);
+ struct nouveau_engctx *engctx = nv_engctx(object);
+ spin_unlock_irqrestore(&engine->lock, engctx->save);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
new file mode 100644
index 00000000000..1f6954ae9dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/engine.h>
+#include <core/option.h>
+
+int
+nouveau_engine_create_(struct nouveau_object *parent,
+ struct nouveau_object *engobj,
+ struct nouveau_oclass *oclass, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
+{
+ struct nouveau_engine *engine;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
+ iname, fname, length, pobject);
+ engine = *pobject;
+ if (ret)
+ return ret;
+
+ if (parent) {
+ struct nouveau_device *device = nv_device(parent);
+ int engidx = nv_engidx(nv_object(engine));
+
+ if (device->disable_mask & (1ULL << engidx)) {
+ if (!nouveau_boolopt(device->cfgopt, iname, false)) {
+ nv_debug(engine, "engine disabled by hw/fw\n");
+ return -ENODEV;
+ }
+
+ nv_warn(engine, "ignoring hw/fw engine disable\n");
+ }
+
+ if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+ if (!enable)
+ nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+ return -ENODEV;
+ }
+ }
+
+ INIT_LIST_HEAD(&engine->contexts);
+ spin_lock_init(&engine->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/enum.c b/drivers/gpu/drm/nouveau/core/core/enum.c
new file mode 100644
index 00000000000..dd434790ccc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *en, u32 value)
+{
+ while (en->name) {
+ if (en->value == value)
+ return en;
+ en++;
+ }
+
+ return NULL;
+}
+
+const struct nouveau_enum *
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+ en = nouveau_enum_find(en, value);
+ if (en)
+ pr_cont("%s", en->name);
+ else
+ pr_cont("(unknown enum 0x%08x)", value);
+ return en;
+}
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+ while (bf->name) {
+ if (value & bf->mask) {
+ pr_cont(" %s", bf->name);
+ value &= ~bf->mask;
+ }
+
+ bf++;
+ }
+
+ if (value)
+ pr_cont(" (unknown bits 0x%08x)", value);
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
new file mode 100644
index 00000000000..ae81d3b5d8b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/os.h>
+#include <core/event.h>
+
+void
+nouveau_event_put(struct nouveau_eventh *handler)
+{
+ struct nouveau_event *event = handler->event;
+ unsigned long flags;
+ u32 m, t;
+
+ if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
+ return;
+
+ spin_lock_irqsave(&event->refs_lock, flags);
+ for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
+ if (!--event->refs[handler->index * event->types_nr + t]) {
+ if (event->disable)
+ event->disable(event, 1 << t, handler->index);
+ }
+
+ }
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+}
+
+void
+nouveau_event_get(struct nouveau_eventh *handler)
+{
+ struct nouveau_event *event = handler->event;
+ unsigned long flags;
+ u32 m, t;
+
+ if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
+ return;
+
+ spin_lock_irqsave(&event->refs_lock, flags);
+ for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
+ if (!event->refs[handler->index * event->types_nr + t]++) {
+ if (event->enable)
+ event->enable(event, 1 << t, handler->index);
+ }
+
+ }
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+}
+
+static void
+nouveau_event_fini(struct nouveau_eventh *handler)
+{
+ struct nouveau_event *event = handler->event;
+ unsigned long flags;
+ nouveau_event_put(handler);
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_del(&handler->head);
+ spin_unlock_irqrestore(&event->list_lock, flags);
+}
+
+static int
+nouveau_event_init(struct nouveau_event *event, u32 types, int index,
+ int (*func)(void *, u32, int), void *priv,
+ struct nouveau_eventh *handler)
+{
+ unsigned long flags;
+
+ if (types & ~((1 << event->types_nr) - 1))
+ return -EINVAL;
+ if (index >= event->index_nr)
+ return -EINVAL;
+
+ handler->event = event;
+ handler->flags = 0;
+ handler->types = types;
+ handler->index = index;
+ handler->func = func;
+ handler->priv = priv;
+
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_add_tail(&handler->head, &event->list[index]);
+ spin_unlock_irqrestore(&event->list_lock, flags);
+ return 0;
+}
+
+int
+nouveau_event_new(struct nouveau_event *event, u32 types, int index,
+ int (*func)(void *, u32, int), void *priv,
+ struct nouveau_eventh **phandler)
+{
+ struct nouveau_eventh *handler;
+ int ret = -ENOMEM;
+
+ if (event->check) {
+ ret = event->check(event, types, index);
+ if (ret)
+ return ret;
+ }
+
+ handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (handler) {
+ ret = nouveau_event_init(event, types, index, func, priv, handler);
+ if (ret)
+ kfree(handler);
+ }
+
+ return ret;
+}
+
+void
+nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
+{
+ BUG_ON(handler != NULL);
+ if (*ref) {
+ nouveau_event_fini(*ref);
+ kfree(*ref);
+ }
+ *ref = handler;
+}
+
+void
+nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
+{
+ struct nouveau_eventh *handler;
+ unsigned long flags;
+
+ if (WARN_ON(index >= event->index_nr))
+ return;
+
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_for_each_entry(handler, &event->list[index], head) {
+ if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
+ continue;
+ if (!(handler->types & types))
+ continue;
+ if (handler->func(handler->priv, handler->types & types, index)
+ != NVKM_EVENT_DROP)
+ continue;
+ nouveau_event_put(handler);
+ }
+ spin_unlock_irqrestore(&event->list_lock, flags);
+}
+
+void
+nouveau_event_destroy(struct nouveau_event **pevent)
+{
+ struct nouveau_event *event = *pevent;
+ if (event) {
+ kfree(event);
+ *pevent = NULL;
+ }
+}
+
+int
+nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
+{
+ struct nouveau_event *event;
+ int i;
+
+ event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
+ sizeof(event->refs[0]), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
+ if (!event->list) {
+ kfree(event);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&event->list_lock);
+ spin_lock_init(&event->refs_lock);
+ for (i = 0; i < index_nr; i++)
+ INIT_LIST_HEAD(&event->list[i]);
+ event->types_nr = types_nr;
+ event->index_nr = index_nr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
new file mode 100644
index 00000000000..560b2214cf1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/gpuobj.h>
+
+#include <subdev/instmem.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
+
+void
+nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
+{
+ int i;
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0x00000000);
+ }
+
+ if (gpuobj->node) {
+ nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap,
+ &gpuobj->node);
+ }
+
+ if (gpuobj->heap.block_size)
+ nouveau_mm_fini(&gpuobj->heap);
+
+ nouveau_object_destroy(&gpuobj->base);
+}
+
+int
+nouveau_gpuobj_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ struct nouveau_object *pargpu,
+ u32 size, u32 align, u32 flags,
+ int length, void **pobject)
+{
+ struct nouveau_instmem *imem = nouveau_instmem(parent);
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nouveau_gpuobj *gpuobj;
+ struct nouveau_mm *heap = NULL;
+ int ret, i;
+ u64 addr;
+
+ *pobject = NULL;
+
+ if (pargpu) {
+ while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
+ if (nv_gpuobj(pargpu)->heap.block_size)
+ break;
+ pargpu = pargpu->parent;
+ }
+
+ if (unlikely(pargpu == NULL)) {
+ nv_error(parent, "no gpuobj heap\n");
+ return -EINVAL;
+ }
+
+ addr = nv_gpuobj(pargpu)->addr;
+ heap = &nv_gpuobj(pargpu)->heap;
+ atomic_inc(&parent->refcount);
+ } else {
+ ret = imem->alloc(imem, parent, size, align, &parent);
+ pargpu = parent;
+ if (ret)
+ return ret;
+
+ addr = nv_memobj(pargpu)->addr;
+ size = nv_memobj(pargpu)->size;
+
+ if (bar && bar->alloc) {
+ struct nouveau_instobj *iobj = (void *)parent;
+ struct nouveau_mem **mem = (void *)(iobj + 1);
+ struct nouveau_mem *node = *mem;
+ if (!bar->alloc(bar, parent, node, &pargpu)) {
+ nouveau_object_ref(NULL, &parent);
+ parent = pargpu;
+ }
+ }
+ }
+
+ ret = nouveau_object_create_(parent, engine, oclass, pclass |
+ NV_GPUOBJ_CLASS, length, pobject);
+ nouveau_object_ref(NULL, &parent);
+ gpuobj = *pobject;
+ if (ret)
+ return ret;
+
+ gpuobj->parent = pargpu;
+ gpuobj->flags = flags;
+ gpuobj->addr = addr;
+ gpuobj->size = size;
+
+ if (heap) {
+ ret = nouveau_mm_head(heap, 1, size, size,
+ max(align, (u32)1), &gpuobj->node);
+ if (ret)
+ return ret;
+
+ gpuobj->addr += gpuobj->node->offset;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
+ ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0x00000000);
+ }
+
+ return ret;
+}
+
+struct nouveau_gpuobj_class {
+ struct nouveau_object *pargpu;
+ u64 size;
+ u32 align;
+ u32 flags;
+};
+
+static int
+_nouveau_gpuobj_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_gpuobj_class *args = data;
+ struct nouveau_gpuobj *object;
+ int ret;
+
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
+ args->size, args->align, args->flags,
+ &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+_nouveau_gpuobj_dtor(struct nouveau_object *object)
+{
+ nouveau_gpuobj_destroy(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_init(struct nouveau_object *object)
+{
+ return nouveau_gpuobj_init(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_gpuobj_fini(nv_gpuobj(object), suspend);
+}
+
+u32
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+ struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+ if (gpuobj->node)
+ addr += gpuobj->node->offset;
+ return pfuncs->rd32(gpuobj->parent, addr);
+}
+
+void
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+ struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+ if (gpuobj->node)
+ addr += gpuobj->node->offset;
+ pfuncs->wr32(gpuobj->parent, addr, data);
+}
+
+static struct nouveau_oclass
+_nouveau_gpuobj_oclass = {
+ .handle = 0x00000000,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_gpuobj_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+ },
+};
+
+int
+nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+ u32 size, u32 align, u32 flags,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ struct nouveau_object *engine = parent;
+ struct nouveau_gpuobj_class args = {
+ .pargpu = pargpu,
+ .size = size,
+ .align = align,
+ .flags = flags,
+ };
+
+ if (!nv_iclass(engine, NV_SUBDEV_CLASS))
+ engine = engine->engine;
+ BUG_ON(engine == NULL);
+
+ return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
+ &args, sizeof(args),
+ (struct nouveau_object **)pgpuobj);
+}
+
+int
+nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access,
+ struct nouveau_vma *vma)
+{
+ struct nouveau_bar *bar = nouveau_bar(gpuobj);
+ int ret = -EINVAL;
+
+ if (bar && bar->umap) {
+ struct nouveau_instobj *iobj = (void *)
+ nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+ struct nouveau_mem **mem = (void *)(iobj + 1);
+ ret = bar->umap(bar, *mem, access, vma);
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
+ u32 access, struct nouveau_vma *vma)
+{
+ struct nouveau_instobj *iobj = (void *)
+ nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+ struct nouveau_mem **mem = (void *)(iobj + 1);
+ int ret;
+
+ ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
+ if (ret)
+ return ret;
+
+ nouveau_vm_map(vma, *mem);
+ return 0;
+}
+
+void
+nouveau_gpuobj_unmap(struct nouveau_vma *vma)
+{
+ if (vma->node) {
+ nouveau_vm_unmap(vma);
+ nouveau_vm_put(vma);
+ }
+}
+
+/* the below is basically only here to support sharing the paged dma object
+ * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
+ * anywhere else.
+ */
+
+static void
+nouveau_gpudup_dtor(struct nouveau_object *object)
+{
+ struct nouveau_gpuobj *gpuobj = (void *)object;
+ nouveau_object_ref(NULL, &gpuobj->parent);
+ nouveau_object_destroy(&gpuobj->base);
+}
+
+static struct nouveau_oclass
+nouveau_gpudup_oclass = {
+ .handle = NV_GPUOBJ_CLASS,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_gpudup_dtor,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ },
+};
+
+int
+nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_object_create(parent, parent->engine,
+ &nouveau_gpudup_oclass, 0, &gpuobj);
+ *pgpuobj = gpuobj;
+ if (ret)
+ return ret;
+
+ nouveau_object_ref(nv_object(base), &gpuobj->parent);
+ gpuobj->addr = base->addr;
+ gpuobj->size = base->size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
new file mode 100644
index 00000000000..264c2b338ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/handle.h>
+#include <core/client.h>
+
+#define hprintk(h,l,f,a...) do { \
+ struct nouveau_client *c = nouveau_client((h)->object); \
+ struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
+ nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \
+} while(0)
+
+int
+nouveau_handle_init(struct nouveau_handle *handle)
+{
+ struct nouveau_handle *item;
+ int ret;
+
+ hprintk(handle, TRACE, "init running\n");
+ ret = nouveau_object_inc(handle->object);
+ if (ret)
+ return ret;
+
+ hprintk(handle, TRACE, "init children\n");
+ list_for_each_entry(item, &handle->tree, head) {
+ ret = nouveau_handle_init(item);
+ if (ret)
+ goto fail;
+ }
+
+ hprintk(handle, TRACE, "init completed\n");
+ return 0;
+fail:
+ hprintk(handle, ERROR, "init failed with %d\n", ret);
+ list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+ nouveau_handle_fini(item, false);
+ }
+
+ nouveau_object_dec(handle->object, false);
+ return ret;
+}
+
+int
+nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
+{
+ static char *name[2] = { "fini", "suspend" };
+ struct nouveau_handle *item;
+ int ret;
+
+ hprintk(handle, TRACE, "%s children\n", name[suspend]);
+ list_for_each_entry(item, &handle->tree, head) {
+ ret = nouveau_handle_fini(item, suspend);
+ if (ret && suspend)
+ goto fail;
+ }
+
+ hprintk(handle, TRACE, "%s running\n", name[suspend]);
+ if (handle->object) {
+ ret = nouveau_object_dec(handle->object, suspend);
+ if (ret && suspend)
+ goto fail;
+ }
+
+ hprintk(handle, TRACE, "%s completed\n", name[suspend]);
+ return 0;
+fail:
+ hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
+ list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+ int rret = nouveau_handle_init(item);
+ if (rret)
+ hprintk(handle, FATAL, "failed to restart, %d\n", rret);
+ }
+
+ return ret;
+}
+
+int
+nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
+ struct nouveau_object *object,
+ struct nouveau_handle **phandle)
+{
+ struct nouveau_object *namedb;
+ struct nouveau_handle *handle;
+ int ret;
+
+ namedb = parent;
+ while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
+ namedb = namedb->parent;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&handle->head);
+ INIT_LIST_HEAD(&handle->tree);
+ handle->name = _handle;
+ handle->priv = ~0;
+
+ ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle);
+ if (ret) {
+ kfree(handle);
+ return ret;
+ }
+
+ if (nv_parent(parent)->object_attach) {
+ ret = nv_parent(parent)->object_attach(parent, object, _handle);
+ if (ret < 0) {
+ nouveau_handle_destroy(handle);
+ return ret;
+ }
+
+ handle->priv = ret;
+ }
+
+ if (object != namedb) {
+ while (!nv_iclass(namedb, NV_CLIENT_CLASS))
+ namedb = namedb->parent;
+
+ handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent);
+ if (handle->parent) {
+ list_add(&handle->head, &handle->parent->tree);
+ nouveau_namedb_put(handle->parent);
+ }
+ }
+
+ hprintk(handle, TRACE, "created\n");
+
+ *phandle = handle;
+
+ return 0;
+}
+
+void
+nouveau_handle_destroy(struct nouveau_handle *handle)
+{
+ struct nouveau_handle *item, *temp;
+
+ hprintk(handle, TRACE, "destroy running\n");
+ list_for_each_entry_safe(item, temp, &handle->tree, head) {
+ nouveau_handle_destroy(item);
+ }
+ list_del(&handle->head);
+
+ if (handle->priv != ~0) {
+ struct nouveau_object *parent = handle->parent->object;
+ nv_parent(parent)->object_detach(parent, handle->priv);
+ }
+
+ hprintk(handle, TRACE, "destroy completed\n");
+ nouveau_namedb_remove(handle);
+ kfree(handle);
+}
+
+struct nouveau_object *
+nouveau_handle_ref(struct nouveau_object *parent, u32 name)
+{
+ struct nouveau_object *object = NULL;
+ struct nouveau_handle *handle;
+
+ while (!nv_iclass(parent, NV_NAMEDB_CLASS))
+ parent = parent->parent;
+
+ handle = nouveau_namedb_get(nv_namedb(parent), name);
+ if (handle) {
+ nouveau_object_ref(handle->object, &object);
+ nouveau_namedb_put(handle);
+ }
+
+ return object;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass)
+{
+ struct nouveau_namedb *namedb;
+ if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+ return nouveau_namedb_get_class(namedb, oclass);
+ return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst)
+{
+ struct nouveau_namedb *namedb;
+ if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+ return nouveau_namedb_get_vinst(namedb, vinst);
+ return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst)
+{
+ struct nouveau_namedb *namedb;
+ if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+ return nouveau_namedb_get_cinst(namedb, cinst);
+ return NULL;
+}
+
+void
+nouveau_handle_put(struct nouveau_handle *handle)
+{
+ if (handle)
+ nouveau_namedb_put(handle);
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
new file mode 100644
index 00000000000..7a4e0891c5f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "core/os.h"
+#include "core/mm.h"
+
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
+ list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
+
+void
+nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
+{
+ struct nouveau_mm_node *this = *pthis;
+
+ if (this) {
+ struct nouveau_mm_node *prev = node(this, prev);
+ struct nouveau_mm_node *next = node(this, next);
+
+ if (prev && prev->type == 0) {
+ prev->length += this->length;
+ list_del(&this->nl_entry);
+ kfree(this); this = prev;
+ }
+
+ if (next && next->type == 0) {
+ next->offset = this->offset;
+ next->length += this->length;
+ if (this->type == 0)
+ list_del(&this->fl_entry);
+ list_del(&this->nl_entry);
+ kfree(this); this = NULL;
+ }
+
+ if (this && this->type != 0) {
+ list_for_each_entry(prev, &mm->free, fl_entry) {
+ if (this->offset < prev->offset)
+ break;
+ }
+
+ list_add_tail(&this->fl_entry, &prev->fl_entry);
+ this->type = 0;
+ }
+ }
+
+ *pthis = NULL;
+}
+
+static struct nouveau_mm_node *
+region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+{
+ struct nouveau_mm_node *b;
+
+ if (a->length == size)
+ return a;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (unlikely(b == NULL))
+ return NULL;
+
+ b->offset = a->offset;
+ b->length = size;
+ b->type = a->type;
+ a->offset += size;
+ a->length -= size;
+ list_add_tail(&b->nl_entry, &a->nl_entry);
+ if (b->type == 0)
+ list_add_tail(&b->fl_entry, &a->fl_entry);
+ return b;
+}
+
+int
+nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+ u32 align, struct nouveau_mm_node **pnode)
+{
+ struct nouveau_mm_node *prev, *this, *next;
+ u32 mask = align - 1;
+ u32 splitoff;
+ u32 s, e;
+
+ BUG_ON(!type);
+
+ list_for_each_entry(this, &mm->free, fl_entry) {
+ e = this->offset + this->length;
+ s = this->offset;
+
+ prev = node(this, prev);
+ if (prev && prev->type != type)
+ s = roundup(s, mm->block_size);
+
+ next = node(this, next);
+ if (next && next->type != type)
+ e = rounddown(e, mm->block_size);
+
+ s = (s + mask) & ~mask;
+ e &= ~mask;
+ if (s > e || e - s < size_min)
+ continue;
+
+ splitoff = s - this->offset;
+ if (splitoff && !region_head(mm, this, splitoff))
+ return -ENOMEM;
+
+ this = region_head(mm, this, min(size_max, e - s));
+ if (!this)
+ return -ENOMEM;
+
+ this->type = type;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static struct nouveau_mm_node *
+region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+{
+ struct nouveau_mm_node *b;
+
+ if (a->length == size)
+ return a;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (unlikely(b == NULL))
+ return NULL;
+
+ a->length -= size;
+ b->offset = a->offset + a->length;
+ b->length = size;
+ b->type = a->type;
+
+ list_add(&b->nl_entry, &a->nl_entry);
+ if (b->type == 0)
+ list_add(&b->fl_entry, &a->fl_entry);
+ return b;
+}
+
+int
+nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+ u32 align, struct nouveau_mm_node **pnode)
+{
+ struct nouveau_mm_node *prev, *this, *next;
+ u32 mask = align - 1;
+
+ BUG_ON(!type);
+
+ list_for_each_entry_reverse(this, &mm->free, fl_entry) {
+ u32 e = this->offset + this->length;
+ u32 s = this->offset;
+ u32 c = 0, a;
+
+ prev = node(this, prev);
+ if (prev && prev->type != type)
+ s = roundup(s, mm->block_size);
+
+ next = node(this, next);
+ if (next && next->type != type) {
+ e = rounddown(e, mm->block_size);
+ c = next->offset - e;
+ }
+
+ s = (s + mask) & ~mask;
+ a = e - s;
+ if (s > e || a < size_min)
+ continue;
+
+ a = min(a, size_max);
+ s = (e - a) & ~mask;
+ c += (e - s) - a;
+
+ if (c && !region_tail(mm, this, c))
+ return -ENOMEM;
+
+ this = region_tail(mm, this, a);
+ if (!this)
+ return -ENOMEM;
+
+ this->type = type;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
+{
+ struct nouveau_mm_node *node;
+
+ if (block) {
+ INIT_LIST_HEAD(&mm->nodes);
+ INIT_LIST_HEAD(&mm->free);
+ mm->block_size = block;
+ mm->heap_nodes = 0;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ if (length) {
+ node->offset = roundup(offset, mm->block_size);
+ node->length = rounddown(offset + length, mm->block_size);
+ node->length -= node->offset;
+ }
+
+ list_add_tail(&node->nl_entry, &mm->nodes);
+ list_add_tail(&node->fl_entry, &mm->free);
+ mm->heap_nodes++;
+ return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm *mm)
+{
+ if (nouveau_mm_initialised(mm)) {
+ struct nouveau_mm_node *node, *heap =
+ list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+ int nodes = 0;
+
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (WARN_ON(nodes++ == mm->heap_nodes))
+ return -EBUSY;
+ }
+
+ kfree(heap);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c
new file mode 100644
index 00000000000..0594a599f6f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+
+static struct nouveau_handle *
+nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (handle->name == name)
+ return handle;
+ }
+
+ return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (nv_mclass(handle->object) == oclass)
+ return handle;
+ }
+
+ return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+ if (nv_gpuobj(handle->object)->addr == vinst)
+ return handle;
+ }
+ }
+
+ return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+ if (nv_gpuobj(handle->object)->node &&
+ nv_gpuobj(handle->object)->node->offset == cinst)
+ return handle;
+ }
+ }
+
+ return NULL;
+}
+
+int
+nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
+ struct nouveau_object *object,
+ struct nouveau_handle *handle)
+{
+ int ret = -EEXIST;
+ write_lock_irq(&namedb->lock);
+ if (!nouveau_namedb_lookup(namedb, name)) {
+ nouveau_object_ref(object, &handle->object);
+ handle->namedb = namedb;
+ list_add(&handle->node, &namedb->list);
+ ret = 0;
+ }
+ write_unlock_irq(&namedb->lock);
+ return ret;
+}
+
+void
+nouveau_namedb_remove(struct nouveau_handle *handle)
+{
+ struct nouveau_namedb *namedb = handle->namedb;
+ struct nouveau_object *object = handle->object;
+ write_lock_irq(&namedb->lock);
+ list_del(&handle->node);
+ write_unlock_irq(&namedb->lock);
+ nouveau_object_ref(NULL, &object);
+}
+
+struct nouveau_handle *
+nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup(namedb, name);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup_class(namedb, oclass);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup_vinst(namedb, vinst);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup_cinst(namedb, cinst);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+void
+nouveau_namedb_put(struct nouveau_handle *handle)
+{
+ if (handle)
+ read_unlock(&handle->namedb->lock);
+}
+
+int
+nouveau_namedb_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ struct nouveau_oclass *sclass, u64 engcls,
+ int length, void **pobject)
+{
+ struct nouveau_namedb *namedb;
+ int ret;
+
+ ret = nouveau_parent_create_(parent, engine, oclass, pclass |
+ NV_NAMEDB_CLASS, sclass, engcls,
+ length, pobject);
+ namedb = *pobject;
+ if (ret)
+ return ret;
+
+ rwlock_init(&namedb->lock);
+ INIT_LIST_HEAD(&namedb->list);
+ return 0;
+}
+
+int
+_nouveau_namedb_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_namedb *object;
+ int ret;
+
+ ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
new file mode 100644
index 00000000000..12453855590
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -0,0 +1,474 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/engine.h>
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
+static DEFINE_SPINLOCK(_objlist_lock);
+#endif
+
+int
+nouveau_object_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ int size, void **pobject)
+{
+ struct nouveau_object *object;
+
+ object = *pobject = kzalloc(size, GFP_KERNEL);
+ if (!object)
+ return -ENOMEM;
+
+ nouveau_object_ref(parent, &object->parent);
+ nouveau_object_ref(engine, &object->engine);
+ object->oclass = oclass;
+ object->oclass->handle |= pclass;
+ atomic_set(&object->refcount, 1);
+ atomic_set(&object->usecount, 0);
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+ object->_magic = NOUVEAU_OBJECT_MAGIC;
+ spin_lock(&_objlist_lock);
+ list_add(&object->list, &_objlist);
+ spin_unlock(&_objlist_lock);
+#endif
+ return 0;
+}
+
+static int
+_nouveau_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_object *object;
+ int ret;
+
+ ret = nouveau_object_create(parent, engine, oclass, 0, &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nouveau_object_destroy(struct nouveau_object *object)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+ spin_lock(&_objlist_lock);
+ list_del(&object->list);
+ spin_unlock(&_objlist_lock);
+#endif
+ nouveau_object_ref(NULL, &object->engine);
+ nouveau_object_ref(NULL, &object->parent);
+ kfree(object);
+}
+
+static void
+_nouveau_object_dtor(struct nouveau_object *object)
+{
+ nouveau_object_destroy(object);
+}
+
+int
+nouveau_object_init(struct nouveau_object *object)
+{
+ return 0;
+}
+
+static int
+_nouveau_object_init(struct nouveau_object *object)
+{
+ return nouveau_object_init(object);
+}
+
+int
+nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+ return 0;
+}
+
+static int
+_nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_object_fini(object, suspend);
+}
+
+struct nouveau_ofuncs
+nouveau_object_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = _nouveau_object_dtor,
+ .init = _nouveau_object_init,
+ .fini = _nouveau_object_fini,
+};
+
+int
+nouveau_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
+ struct nouveau_object *object = NULL;
+ int ret;
+
+ ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
+ *pobject = object;
+ if (ret < 0) {
+ if (ret != -ENODEV) {
+ nv_error(parent, "failed to create 0x%08x, %d\n",
+ oclass->handle, ret);
+ }
+
+ if (object) {
+ ofuncs->dtor(object);
+ *pobject = NULL;
+ }
+
+ return ret;
+ }
+
+ if (ret == 0) {
+ nv_trace(object, "created\n");
+ atomic_set(&object->refcount, 1);
+ }
+
+ return 0;
+}
+
+static void
+nouveau_object_dtor(struct nouveau_object *object)
+{
+ nv_trace(object, "destroying\n");
+ nv_ofuncs(object)->dtor(object);
+}
+
+void
+nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
+{
+ if (obj) {
+ atomic_inc(&obj->refcount);
+ nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
+ }
+
+ if (*ref) {
+ int dead = atomic_dec_and_test(&(*ref)->refcount);
+ nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
+ if (dead)
+ nouveau_object_dtor(*ref);
+ }
+
+ *ref = obj;
+}
+
+int
+nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
+ u16 _oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *engctx = NULL;
+ struct nouveau_object *object = NULL;
+ struct nouveau_object *engine;
+ struct nouveau_oclass *oclass;
+ struct nouveau_handle *handle;
+ int ret;
+
+ /* lookup parent object and ensure it *is* a parent */
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent) {
+ nv_error(client, "parent 0x%08x not found\n", _parent);
+ return -ENOENT;
+ }
+
+ if (!nv_iclass(parent, NV_PARENT_CLASS)) {
+ nv_error(parent, "cannot have children\n");
+ ret = -EINVAL;
+ goto fail_class;
+ }
+
+ /* check that parent supports the requested subclass */
+ ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
+ if (ret) {
+ nv_debug(parent, "illegal class 0x%04x\n", _oclass);
+ goto fail_class;
+ }
+
+ /* make sure engine init has been completed *before* any objects
+ * it controls are created - the constructors may depend on
+ * state calculated at init (ie. default context construction)
+ */
+ if (engine) {
+ ret = nouveau_object_inc(engine);
+ if (ret)
+ goto fail_class;
+ }
+
+ /* if engine requires it, create a context object to insert
+ * between the parent and its children (eg. PGRAPH context)
+ */
+ if (engine && nv_engine(engine)->cclass) {
+ ret = nouveau_object_ctor(parent, engine,
+ nv_engine(engine)->cclass,
+ data, size, &engctx);
+ if (ret)
+ goto fail_engctx;
+ } else {
+ nouveau_object_ref(parent, &engctx);
+ }
+
+ /* finally, create new object and bind it to its handle */
+ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+ *pobject = object;
+ if (ret)
+ goto fail_ctor;
+
+ ret = nouveau_object_inc(object);
+ if (ret)
+ goto fail_init;
+
+ ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
+ if (ret)
+ goto fail_handle;
+
+ ret = nouveau_handle_init(handle);
+ if (ret)
+ nouveau_handle_destroy(handle);
+
+fail_handle:
+ nouveau_object_dec(object, false);
+fail_init:
+ nouveau_object_ref(NULL, &object);
+fail_ctor:
+ nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+ if (engine)
+ nouveau_object_dec(engine, false);
+fail_class:
+ nouveau_object_ref(NULL, &parent);
+ return ret;
+}
+
+int
+nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *namedb = NULL;
+ struct nouveau_handle *handle = NULL;
+
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent)
+ return -ENOENT;
+
+ namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
+ if (namedb) {
+ handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
+ if (handle) {
+ nouveau_namedb_put(handle);
+ nouveau_handle_fini(handle, false);
+ nouveau_handle_destroy(handle);
+ }
+ }
+
+ nouveau_object_ref(NULL, &parent);
+ return handle ? 0 : -EINVAL;
+}
+
+int
+nouveau_object_inc(struct nouveau_object *object)
+{
+ int ref = atomic_add_return(1, &object->usecount);
+ int ret;
+
+ nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
+ if (ref != 1)
+ return 0;
+
+ nv_trace(object, "initialising...\n");
+ if (object->parent) {
+ ret = nouveau_object_inc(object->parent);
+ if (ret) {
+ nv_error(object, "parent failed, %d\n", ret);
+ goto fail_parent;
+ }
+ }
+
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ ret = nouveau_object_inc(object->engine);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ if (ret) {
+ nv_error(object, "engine failed, %d\n", ret);
+ goto fail_engine;
+ }
+ }
+
+ ret = nv_ofuncs(object)->init(object);
+ atomic_set(&object->usecount, 1);
+ if (ret) {
+ nv_error(object, "init failed, %d\n", ret);
+ goto fail_self;
+ }
+
+ nv_trace(object, "initialised\n");
+ return 0;
+
+fail_self:
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ nouveau_object_dec(object->engine, false);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ }
+fail_engine:
+ if (object->parent)
+ nouveau_object_dec(object->parent, false);
+fail_parent:
+ atomic_dec(&object->usecount);
+ return ret;
+}
+
+static int
+nouveau_object_decf(struct nouveau_object *object)
+{
+ int ret;
+
+ nv_trace(object, "stopping...\n");
+
+ ret = nv_ofuncs(object)->fini(object, false);
+ atomic_set(&object->usecount, 0);
+ if (ret)
+ nv_warn(object, "failed fini, %d\n", ret);
+
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ nouveau_object_dec(object->engine, false);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ }
+
+ if (object->parent)
+ nouveau_object_dec(object->parent, false);
+
+ nv_trace(object, "stopped\n");
+ return 0;
+}
+
+static int
+nouveau_object_decs(struct nouveau_object *object)
+{
+ int ret, rret;
+
+ nv_trace(object, "suspending...\n");
+
+ ret = nv_ofuncs(object)->fini(object, true);
+ atomic_set(&object->usecount, 0);
+ if (ret) {
+ nv_error(object, "failed suspend, %d\n", ret);
+ return ret;
+ }
+
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ ret = nouveau_object_dec(object->engine, true);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ if (ret) {
+ nv_warn(object, "engine failed suspend, %d\n", ret);
+ goto fail_engine;
+ }
+ }
+
+ if (object->parent) {
+ ret = nouveau_object_dec(object->parent, true);
+ if (ret) {
+ nv_warn(object, "parent failed suspend, %d\n", ret);
+ goto fail_parent;
+ }
+ }
+
+ nv_trace(object, "suspended\n");
+ return 0;
+
+fail_parent:
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ rret = nouveau_object_inc(object->engine);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ if (rret)
+ nv_fatal(object, "engine failed to reinit, %d\n", rret);
+ }
+
+fail_engine:
+ rret = nv_ofuncs(object)->init(object);
+ if (rret)
+ nv_fatal(object, "failed to reinit, %d\n", rret);
+
+ return ret;
+}
+
+int
+nouveau_object_dec(struct nouveau_object *object, bool suspend)
+{
+ int ref = atomic_add_return(-1, &object->usecount);
+ int ret;
+
+ nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
+
+ if (ref == 0) {
+ if (suspend)
+ ret = nouveau_object_decs(object);
+ else
+ ret = nouveau_object_decf(object);
+
+ if (ret) {
+ atomic_inc(&object->usecount);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void
+nouveau_object_debug(void)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+ struct nouveau_object *object;
+ if (!list_empty(&_objlist)) {
+ nv_fatal(NULL, "*******************************************\n");
+ nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
+ nv_fatal(NULL, "*******************************************\n");
+ list_for_each_entry(object, &_objlist, list) {
+ nv_fatal(object, "%p/%p/%d/%d\n",
+ object->parent, object->engine,
+ atomic_read(&object->refcount),
+ atomic_read(&object->usecount));
+ }
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
new file mode 100644
index 00000000000..9f6fcc5f66c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+#include <core/debug.h>
+
+const char *
+nouveau_stropt(const char *optstr, const char *opt, int *arglen)
+{
+ while (optstr && *optstr != '\0') {
+ int len = strcspn(optstr, ",=");
+ switch (optstr[len]) {
+ case '=':
+ if (!strncasecmpz(optstr, opt, len)) {
+ optstr += len + 1;
+ *arglen = strcspn(optstr, ",=");
+ return *arglen ? optstr : NULL;
+ }
+ optstr++;
+ break;
+ case ',':
+ optstr++;
+ break;
+ default:
+ break;
+ }
+ optstr += len;
+ }
+
+ return NULL;
+}
+
+bool
+nouveau_boolopt(const char *optstr, const char *opt, bool value)
+{
+ int arglen;
+
+ optstr = nouveau_stropt(optstr, opt, &arglen);
+ if (optstr) {
+ if (!strncasecmpz(optstr, "0", arglen) ||
+ !strncasecmpz(optstr, "no", arglen) ||
+ !strncasecmpz(optstr, "off", arglen) ||
+ !strncasecmpz(optstr, "false", arglen))
+ value = false;
+ else
+ if (!strncasecmpz(optstr, "1", arglen) ||
+ !strncasecmpz(optstr, "yes", arglen) ||
+ !strncasecmpz(optstr, "on", arglen) ||
+ !strncasecmpz(optstr, "true", arglen))
+ value = true;
+ }
+
+ return value;
+}
+
+int
+nouveau_dbgopt(const char *optstr, const char *sub)
+{
+ int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
+
+ while (optstr) {
+ int len = strcspn(optstr, ",=");
+ switch (optstr[len]) {
+ case '=':
+ if (strncasecmpz(optstr, sub, len))
+ mode = 0;
+ optstr++;
+ break;
+ default:
+ if (mode) {
+ if (!strncasecmpz(optstr, "fatal", len))
+ level = NV_DBG_FATAL;
+ else if (!strncasecmpz(optstr, "error", len))
+ level = NV_DBG_ERROR;
+ else if (!strncasecmpz(optstr, "warn", len))
+ level = NV_DBG_WARN;
+ else if (!strncasecmpz(optstr, "info", len))
+ level = NV_DBG_INFO_NORMAL;
+ else if (!strncasecmpz(optstr, "debug", len))
+ level = NV_DBG_DEBUG;
+ else if (!strncasecmpz(optstr, "trace", len))
+ level = NV_DBG_TRACE;
+ else if (!strncasecmpz(optstr, "paranoia", len))
+ level = NV_DBG_PARANOIA;
+ else if (!strncasecmpz(optstr, "spam", len))
+ level = NV_DBG_SPAM;
+ }
+
+ if (optstr[len] != '\0') {
+ optstr++;
+ mode = 1;
+ break;
+ }
+
+ return level;
+ }
+ optstr += len;
+ }
+
+ return level;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
new file mode 100644
index 00000000000..dee5d1235e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/client.h>
+
+int
+nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
+ struct nouveau_object **pengine,
+ struct nouveau_oclass **poclass)
+{
+ struct nouveau_sclass *sclass;
+ struct nouveau_engine *engine;
+ struct nouveau_oclass *oclass;
+ u64 mask;
+
+ sclass = nv_parent(parent)->sclass;
+ while (sclass) {
+ if ((sclass->oclass->handle & 0xffff) == handle) {
+ *pengine = parent->engine;
+ *poclass = sclass->oclass;
+ return 0;
+ }
+
+ sclass = sclass->sclass;
+ }
+
+ mask = nv_parent(parent)->engine;
+ while (mask) {
+ int i = __ffs64(mask);
+
+ if (nv_iclass(parent, NV_CLIENT_CLASS))
+ engine = nv_engine(nv_client(parent)->device);
+ else
+ engine = nouveau_engine(parent, i);
+
+ if (engine) {
+ oclass = engine->sclass;
+ while (oclass->ofuncs) {
+ if ((oclass->handle & 0xffff) == handle) {
+ *pengine = nv_object(engine);
+ *poclass = oclass;
+ return 0;
+ }
+ oclass++;
+ }
+ }
+
+ mask &= ~(1ULL << i);
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_parent_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ struct nouveau_oclass *sclass, u64 engcls,
+ int size, void **pobject)
+{
+ struct nouveau_parent *object;
+ struct nouveau_sclass *nclass;
+ int ret;
+
+ ret = nouveau_object_create_(parent, engine, oclass, pclass |
+ NV_PARENT_CLASS, size, pobject);
+ object = *pobject;
+ if (ret)
+ return ret;
+
+ while (sclass && sclass->ofuncs) {
+ nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
+ if (!nclass)
+ return -ENOMEM;
+
+ nclass->sclass = object->sclass;
+ object->sclass = nclass;
+ nclass->engine = engine ? nv_engine(engine) : NULL;
+ nclass->oclass = sclass;
+ sclass++;
+ }
+
+ object->engine = engcls;
+ return 0;
+}
+
+void
+nouveau_parent_destroy(struct nouveau_parent *parent)
+{
+ struct nouveau_sclass *sclass;
+
+ while ((sclass = parent->sclass)) {
+ parent->sclass = sclass->sclass;
+ kfree(sclass);
+ }
+
+ nouveau_object_destroy(&parent->base);
+}
+
+
+void
+_nouveau_parent_dtor(struct nouveau_object *object)
+{
+ nouveau_parent_destroy(nv_parent(object));
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
new file mode 100644
index 00000000000..03e0060b13d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/subdev.h>
+#include <core/printk.h>
+
+int nv_info_debug_level = NV_DBG_INFO_NORMAL;
+
+void
+nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
+{
+ static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
+ const char *pfx;
+ char mfmt[256];
+ va_list args;
+
+ switch (level) {
+ case NV_DBG_FATAL:
+ pfx = KERN_CRIT;
+ break;
+ case NV_DBG_ERROR:
+ pfx = KERN_ERR;
+ break;
+ case NV_DBG_WARN:
+ pfx = KERN_WARNING;
+ break;
+ case NV_DBG_INFO_NORMAL:
+ pfx = KERN_INFO;
+ break;
+ case NV_DBG_DEBUG:
+ case NV_DBG_PARANOIA:
+ case NV_DBG_TRACE:
+ case NV_DBG_SPAM:
+ default:
+ pfx = KERN_DEBUG;
+ break;
+ }
+
+ if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
+ struct nouveau_object *device = object;
+ struct nouveau_object *subdev = object;
+ char obuf[64], *ofmt = "";
+
+ if (object->engine) {
+ snprintf(obuf, sizeof(obuf), "[0x%08x][%p]",
+ nv_hclass(object), object);
+ ofmt = obuf;
+ subdev = object->engine;
+ device = object->engine;
+ }
+
+ if (subdev->parent)
+ device = subdev->parent;
+
+ if (level > nv_subdev(subdev)->debug)
+ return;
+
+ snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
+ name[level], nv_subdev(subdev)->name,
+ nv_device(device)->name, ofmt, fmt);
+ } else
+ if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
+ if (level > nv_client(object)->debug)
+ return;
+
+ snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
+ name[level], nv_client(object)->name, fmt);
+ } else {
+ snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
+ }
+
+ va_start(args, fmt);
+ vprintk(mfmt, args);
+ va_end(args);
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
new file mode 100644
index 00000000000..f3b9bddc387
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/object.h>
+#include <core/ramht.h>
+
+#include <subdev/bar.h>
+
+static u32
+nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
+{
+ u32 hash = 0;
+
+ while (handle) {
+ hash ^= (handle & ((1 << ramht->bits) - 1));
+ handle >>= ramht->bits;
+ }
+
+ hash ^= chid << (ramht->bits - 4);
+ hash = hash << 3;
+ return hash;
+}
+
+int
+nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
+ u32 handle, u32 context)
+{
+ struct nouveau_bar *bar = nouveau_bar(ramht);
+ u32 co, ho;
+
+ co = ho = nouveau_ramht_hash(ramht, chid, handle);
+ do {
+ if (!nv_ro32(ramht, co + 4)) {
+ nv_wo32(ramht, co + 0, handle);
+ nv_wo32(ramht, co + 4, context);
+ if (bar)
+ bar->flush(bar);
+ return co;
+ }
+
+ co += 8;
+ if (co >= nv_gpuobj(ramht)->size)
+ co = 0;
+ } while (co != ho);
+
+ return -ENOMEM;
+}
+
+void
+nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
+{
+ struct nouveau_bar *bar = nouveau_bar(ramht);
+ nv_wo32(ramht, cookie + 0, 0x00000000);
+ nv_wo32(ramht, cookie + 4, 0x00000000);
+ if (bar)
+ bar->flush(bar);
+}
+
+static struct nouveau_oclass
+nouveau_ramht_oclass = {
+ .handle = 0x0000abcd,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = NULL,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+ },
+};
+
+int
+nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+ u32 size, u32 align, struct nouveau_ramht **pramht)
+{
+ struct nouveau_ramht *ramht;
+ int ret;
+
+ ret = nouveau_gpuobj_create(parent, parent->engine ?
+ parent->engine : parent, /* <nv50 ramht */
+ &nouveau_ramht_oclass, 0, pargpu, size,
+ align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+ *pramht = ramht;
+ if (ret)
+ return ret;
+
+ ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
new file mode 100644
index 00000000000..2ea5568b6cf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/option.h>
+
+void
+nouveau_subdev_reset(struct nouveau_object *subdev)
+{
+ nv_trace(subdev, "resetting...\n");
+ nv_ofuncs(subdev)->fini(subdev, false);
+ nv_debug(subdev, "reset\n");
+}
+
+int
+nouveau_subdev_init(struct nouveau_subdev *subdev)
+{
+ int ret = nouveau_object_init(&subdev->base);
+ if (ret)
+ return ret;
+
+ nouveau_subdev_reset(&subdev->base);
+ return 0;
+}
+
+int
+_nouveau_subdev_init(struct nouveau_object *object)
+{
+ return nouveau_subdev_init(nv_subdev(object));
+}
+
+int
+nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend)
+{
+ if (subdev->unit) {
+ nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
+ nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+ }
+
+ return nouveau_object_fini(&subdev->base, suspend);
+}
+
+int
+_nouveau_subdev_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_subdev_fini(nv_subdev(object), suspend);
+}
+
+void
+nouveau_subdev_destroy(struct nouveau_subdev *subdev)
+{
+ int subidx = nv_hclass(subdev) & 0xff;
+ nv_device(subdev)->subdev[subidx] = NULL;
+ nouveau_object_destroy(&subdev->base);
+}
+
+void
+_nouveau_subdev_dtor(struct nouveau_object *object)
+{
+ nouveau_subdev_destroy(nv_subdev(object));
+}
+
+int
+nouveau_subdev_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ const char *subname, const char *sysname,
+ int size, void **pobject)
+{
+ struct nouveau_subdev *subdev;
+ int ret;
+
+ ret = nouveau_object_create_(parent, engine, oclass, pclass |
+ NV_SUBDEV_CLASS, size, pobject);
+ subdev = *pobject;
+ if (ret)
+ return ret;
+
+ __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
+ subdev->name = subname;
+
+ if (parent) {
+ struct nouveau_device *device = nv_device(parent);
+ subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
+ subdev->mmio = nv_subdev(device)->mmio;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
new file mode 100644
index 00000000000..1e8e75c0684
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Ilia Mirkin
+ */
+
+#include <engine/xtensa.h>
+#include <engine/bsp.h>
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_bsp_sclass[] = {
+ { 0x74b0, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * BSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_xtensa_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+ },
+};
+
+/*******************************************************************************
+ * BSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_xtensa *priv;
+ int ret;
+
+ ret = nouveau_xtensa_create(parent, engine, oclass, 0x103000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x04008000;
+ nv_engine(priv)->cclass = &nv84_bsp_cclass;
+ nv_engine(priv)->sclass = nv84_bsp_sclass;
+ priv->fifo_val = 0x1111;
+ priv->unkd28 = 0x90044;
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_bsp_ctor,
+ .dtor = _nouveau_xtensa_dtor,
+ .init = _nouveau_xtensa_init,
+ .fini = _nouveau_xtensa_fini,
+ .rd32 = _nouveau_xtensa_rd32,
+ .wr32 = _nouveau_xtensa_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
new file mode 100644
index 00000000000..6b089e022fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+
+#include <engine/falcon.h>
+#include <engine/bsp.h>
+
+struct nv98_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_bsp_sclass[] = {
+ { 0x88b1, &nouveau_object_ofuncs },
+ { 0x85b1, &nouveau_object_ofuncs },
+ { 0x86b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv98_bsp_init(struct nouveau_object *object)
+{
+ struct nv98_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000ffd2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv98_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x04008000;
+ nv_engine(priv)->cclass = &nv98_bsp_cclass;
+ nv_engine(priv)->sclass = nv98_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nv98_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nv98_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 00000000000..ce860de43e6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <engine/falcon.h>
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+ { 0x90b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+ struct nvc0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
+ nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+ nv_engine(priv)->sclass = nvc0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 00000000000..ba6aeca0285
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/falcon.h>
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+ { 0x95b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+ struct nve0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
+ nv_engine(priv)->cclass = &nve0_bsp_cclass;
+ nv_engine(priv)->sclass = nve0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
new file mode 100644
index 00000000000..219850d5328
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
@@ -0,0 +1,872 @@
+/* fuc microcode for copy engine on nva3- chipsets
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build for nva3:nvc0
+ * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h
+ *
+ * To build for nvc0-
+ * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h
+ */
+
+ifdef(`NVA3',
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
+)
+
+ctx_object: .b32 0
+ifdef(`NVA3',
+ctx_dma:
+ctx_dma_query: .b32 0
+ctx_dma_src: .b32 0
+ctx_dma_dst: .b32 0
+,)
+.equ #ctx_dma_count 3
+ctx_query_address_high: .b32 0
+ctx_query_address_low: .b32 0
+ctx_query_counter: .b32 0
+ctx_src_address_high: .b32 0
+ctx_src_address_low: .b32 0
+ctx_src_pitch: .b32 0
+ctx_src_tile_mode: .b32 0
+ctx_src_xsize: .b32 0
+ctx_src_ysize: .b32 0
+ctx_src_zsize: .b32 0
+ctx_src_zoff: .b32 0
+ctx_src_xoff: .b32 0
+ctx_src_yoff: .b32 0
+ctx_src_cpp: .b32 0
+ctx_dst_address_high: .b32 0
+ctx_dst_address_low: .b32 0
+ctx_dst_pitch: .b32 0
+ctx_dst_tile_mode: .b32 0
+ctx_dst_xsize: .b32 0
+ctx_dst_ysize: .b32 0
+ctx_dst_zsize: .b32 0
+ctx_dst_zoff: .b32 0
+ctx_dst_xoff: .b32 0
+ctx_dst_yoff: .b32 0
+ctx_dst_cpp: .b32 0
+ctx_format: .b32 0
+ctx_swz_const0: .b32 0
+ctx_swz_const1: .b32 0
+ctx_xcnt: .b32 0
+ctx_ycnt: .b32 0
+.align 256
+
+dispatch_table:
+// mthd 0x0000, NAME
+.b16 0x000 1
+.b32 #ctx_object ~0xffffffff
+// mthd 0x0100, NOP
+.b16 0x040 1
+.b32 0x00010000 + #cmd_nop ~0xffffffff
+// mthd 0x0140, PM_TRIGGER
+.b16 0x050 1
+.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
+ifdef(`NVA3', `
+// mthd 0x0180-0x018c, DMA_
+.b16 0x060 #ctx_dma_count
+dispatch_dma:
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+',)
+// mthd 0x0200-0x0218, SRC_TILE
+.b16 0x80 7
+.b32 #ctx_src_tile_mode ~0x00000fff
+.b32 #ctx_src_xsize ~0x0007ffff
+.b32 #ctx_src_ysize ~0x00001fff
+.b32 #ctx_src_zsize ~0x000007ff
+.b32 #ctx_src_zoff ~0x00000fff
+.b32 #ctx_src_xoff ~0x0007ffff
+.b32 #ctx_src_yoff ~0x00001fff
+// mthd 0x0220-0x0238, DST_TILE
+.b16 0x88 7
+.b32 #ctx_dst_tile_mode ~0x00000fff
+.b32 #ctx_dst_xsize ~0x0007ffff
+.b32 #ctx_dst_ysize ~0x00001fff
+.b32 #ctx_dst_zsize ~0x000007ff
+.b32 #ctx_dst_zoff ~0x00000fff
+.b32 #ctx_dst_xoff ~0x0007ffff
+.b32 #ctx_dst_yoff ~0x00001fff
+// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
+.b16 0xc0 2
+.b32 0x00010000 + #cmd_exec ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
+// mthd 0x030c-0x0340, various stuff
+.b16 0xc3 14
+.b32 #ctx_src_address_high ~0x000000ff
+.b32 #ctx_src_address_low ~0xffffffff
+.b32 #ctx_dst_address_high ~0x000000ff
+.b32 #ctx_dst_address_low ~0xffffffff
+.b32 #ctx_src_pitch ~0x0007ffff
+.b32 #ctx_dst_pitch ~0x0007ffff
+.b32 #ctx_xcnt ~0x0000ffff
+.b32 #ctx_ycnt ~0x00001fff
+.b32 #ctx_format ~0x0333ffff
+.b32 #ctx_swz_const0 ~0xffffffff
+.b32 #ctx_swz_const1 ~0xffffffff
+.b32 #ctx_query_address_high ~0x000000ff
+.b32 #ctx_query_address_low ~0xffffffff
+.b32 #ctx_query_counter ~0xffffffff
+.b16 0x800 0
+
+ifdef(`NVA3',
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
+)
+
+main:
+ clear b32 $r0
+ mov $sp $r0
+
+ // setup i0 handler and route fifo and ctxswitch to it
+ mov $r1 #ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ movw $r2 0xfff3
+ sethi $r2 0
+ iowr I[$r1 + 0x300] $r2
+
+ // enable interrupts
+ or $r2 0xc
+ iowr I[$r1] $r2
+ bset $flags ie0
+
+ // enable fifo access and context switching
+ mov $r1 0x1200
+ mov $r2 3
+ iowr I[$r1] $r2
+
+ // sleep forever, waking for interrupts
+ bset $flags $p0
+ spin:
+ sleep $p0
+ bra #spin
+
+// i0 handler
+ih:
+ iord $r1 I[$r0 + 0x200]
+
+ and $r2 $r1 0x00000008
+ bra e #ih_no_chsw
+ call #chsw
+ ih_no_chsw:
+ and $r2 $r1 0x00000004
+ bra e #ih_no_cmd
+ call #dispatch
+
+ ih_no_cmd:
+ and $r1 $r1 0x0000000c
+ iowr I[$r0 + 0x100] $r1
+ iret
+
+// $p1 direction (0 = unload, 1 = load)
+// $r3 channel
+swctx:
+ mov $r4 0x7700
+ mov $xtargets $r4
+ifdef(`NVA3', `
+ // target 7 hardcoded to ctx dma object
+ mov $xdbase $r0
+', ` // NVC0
+ // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1
+ mov $r4 0x2100
+ iord $r4 I[$r4 + 0]
+ and $r4 1
+ shl b32 $r4 4
+ add b32 $r4 0x30
+
+ // channel is in vram
+ mov $r15 0x61c
+ shl b32 $r15 6
+ mov $r5 0x114
+ iowrs I[$r15] $r5
+
+ // read 16-byte PCOPYn info, containing context pointer, from channel
+ shl b32 $r5 $r3 4
+ add b32 $r5 2
+ mov $xdbase $r5
+ mov $r5 $sp
+ // get a chunk of stack space, aligned to 256 byte boundary
+ sub b32 $r5 0x100
+ mov $r6 0xff
+ not b32 $r6
+ and $r5 $r6
+ sethi $r5 0x00020000
+ xdld $r4 $r5
+ xdwait
+ sethi $r5 0
+
+ // set context pointer, from within channel VM
+ mov $r14 0
+ iowrs I[$r15] $r14
+ ld b32 $r4 D[$r5 + 0]
+ shr b32 $r4 8
+ ld b32 $r6 D[$r5 + 4]
+ shl b32 $r6 24
+ or $r4 $r6
+ mov $xdbase $r4
+')
+ // 256-byte context, at start of data segment
+ mov b32 $r4 $r0
+ sethi $r4 0x60000
+
+ // swap!
+ bra $p1 #swctx_load
+ xdst $r0 $r4
+ bra #swctx_done
+ swctx_load:
+ xdld $r0 $r4
+ swctx_done:
+ xdwait
+ ret
+
+chsw:
+ // read current channel
+ mov $r2 0x1400
+ iord $r3 I[$r2]
+
+ // if it's active, unload it and return
+ xbit $r15 $r3 0x1e
+ bra e #chsw_no_unload
+ bclr $flags $p1
+ call #swctx
+ bclr $r3 0x1e
+ iowr I[$r2] $r3
+ mov $r4 1
+ iowr I[$r2 + 0x200] $r4
+ ret
+
+ // read next channel
+ chsw_no_unload:
+ iord $r3 I[$r2 + 0x100]
+
+ // is there a channel waiting to be loaded?
+ xbit $r13 $r3 0x1e
+ bra e #chsw_finish_load
+ bset $flags $p1
+ call #swctx
+ifdef(`NVA3',
+ // load dma objects back into TARGET regs
+ mov $r5 #ctx_dma
+ mov $r6 #ctx_dma_count
+ chsw_load_ctx_dma:
+ ld b32 $r7 D[$r5 + $r6 * 4]
+ add b32 $r8 $r6 0x180
+ shl b32 $r8 8
+ iowr I[$r8] $r7
+ sub b32 $r6 1
+ bra nc #chsw_load_ctx_dma
+,)
+
+ chsw_finish_load:
+ mov $r3 2
+ iowr I[$r2 + 0x200] $r3
+ ret
+
+dispatch:
+ // read incoming fifo command
+ mov $r3 0x1900
+ iord $r2 I[$r3 + 0x100]
+ iord $r3 I[$r3 + 0x000]
+ and $r4 $r2 0x7ff
+ // $r2 will be used to store exception data
+ shl b32 $r2 0x10
+
+ // lookup method in the dispatch table, ILLEGAL_MTHD if not found
+ mov $r5 #dispatch_table
+ clear b32 $r6
+ clear b32 $r7
+ dispatch_loop:
+ ld b16 $r6 D[$r5 + 0]
+ ld b16 $r7 D[$r5 + 2]
+ add b32 $r5 4
+ cmpu b32 $r4 $r6
+ bra c #dispatch_illegal_mthd
+ add b32 $r7 $r6
+ cmpu b32 $r4 $r7
+ bra c #dispatch_valid_mthd
+ sub b32 $r7 $r6
+ shl b32 $r7 3
+ add b32 $r5 $r7
+ bra #dispatch_loop
+
+ // ensure no bits set in reserved fields, INVALID_BITFIELD
+ dispatch_valid_mthd:
+ sub b32 $r4 $r6
+ shl b32 $r4 3
+ add b32 $r4 $r5
+ ld b32 $r5 D[$r4 + 4]
+ and $r5 $r3
+ cmpu b32 $r5 0
+ bra ne #dispatch_invalid_bitfield
+
+ // depending on dispatch flags: execute method, or save data as state
+ ld b16 $r5 D[$r4 + 0]
+ ld b16 $r6 D[$r4 + 2]
+ cmpu b32 $r6 0
+ bra ne #dispatch_cmd
+ st b32 D[$r5] $r3
+ bra #dispatch_done
+ dispatch_cmd:
+ bclr $flags $p1
+ call $r5
+ bra $p1 #dispatch_error
+ bra #dispatch_done
+
+ dispatch_invalid_bitfield:
+ or $r2 2
+ dispatch_illegal_mthd:
+ or $r2 1
+
+ // store exception data in SCRATCH0/SCRATCH1, signal hostirq
+ dispatch_error:
+ mov $r4 0x1000
+ iowr I[$r4 + 0x000] $r2
+ iowr I[$r4 + 0x100] $r3
+ mov $r2 0x40
+ iowr I[$r0] $r2
+ hostirq_wait:
+ iord $r2 I[$r0 + 0x200]
+ and $r2 0x40
+ cmpu b32 $r2 0
+ bra ne #hostirq_wait
+
+ dispatch_done:
+ mov $r2 0x1d00
+ mov $r3 1
+ iowr I[$r2] $r3
+ ret
+
+// No-operation
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_nop:
+ ret
+
+// PM_TRIGGER
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_pm_trigger:
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x20000
+ iowr I[$r2] $r3
+ ret
+
+ifdef(`NVA3',
+// SET_DMA_* method handler
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_dma:
+ sub b32 $r4 #dispatch_dma
+ shr b32 $r4 1
+ bset $r3 0x1e
+ st b32 D[$r4 + #ctx_dma] $r3
+ add b32 $r4 0x600
+ shl b32 $r4 6
+ iowr I[$r4] $r3
+ ret
+,)
+
+// Calculates the hw swizzle mask and adjusts the surface's xcnt to match
+//
+cmd_exec_set_format:
+ // zero out a chunk of the stack to store the swizzle into
+ add $sp -0x10
+ st b32 D[$sp + 0x00] $r0
+ st b32 D[$sp + 0x04] $r0
+ st b32 D[$sp + 0x08] $r0
+ st b32 D[$sp + 0x0c] $r0
+
+ // extract cpp, src_ncomp and dst_ncomp from FORMAT
+ ld b32 $r4 D[$r0 + #ctx_format]
+ extr $r5 $r4 16:17
+ add b32 $r5 1
+ extr $r6 $r4 20:21
+ add b32 $r6 1
+ extr $r7 $r4 24:25
+ add b32 $r7 1
+
+ // convert FORMAT swizzle mask to hw swizzle mask
+ bclr $flags $p2
+ clear b32 $r8
+ clear b32 $r9
+ ncomp_loop:
+ and $r10 $r4 0xf
+ shr b32 $r4 4
+ clear b32 $r11
+ bpc_loop:
+ cmpu b8 $r10 4
+ bra nc #cmp_c0
+ mulu $r12 $r10 $r5
+ add b32 $r12 $r11
+ bset $flags $p2
+ bra #bpc_next
+ cmp_c0:
+ bra ne #cmp_c1
+ mov $r12 0x10
+ add b32 $r12 $r11
+ bra #bpc_next
+ cmp_c1:
+ cmpu b8 $r10 6
+ bra nc #cmp_zero
+ mov $r12 0x14
+ add b32 $r12 $r11
+ bra #bpc_next
+ cmp_zero:
+ mov $r12 0x80
+ bpc_next:
+ st b8 D[$sp + $r8] $r12
+ add b32 $r8 1
+ add b32 $r11 1
+ cmpu b32 $r11 $r5
+ bra c #bpc_loop
+ add b32 $r9 1
+ cmpu b32 $r9 $r7
+ bra c #ncomp_loop
+
+ // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
+ mulu $r6 $r5
+ st b32 D[$r0 + #ctx_src_cpp] $r6
+ ld b32 $r8 D[$r0 + #ctx_xcnt]
+ mulu $r6 $r8
+ bra $p2 #dst_xcnt
+ clear b32 $r6
+
+ dst_xcnt:
+ mulu $r7 $r5
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
+ mulu $r7 $r8
+
+ mov $r5 0x810
+ shl b32 $r5 6
+ iowr I[$r5 + 0x000] $r6
+ iowr I[$r5 + 0x100] $r7
+ add b32 $r5 0x800
+ ld b32 $r6 D[$r0 + #ctx_dst_cpp]
+ sub b32 $r6 1
+ shl b32 $r6 8
+ ld b32 $r7 D[$r0 + #ctx_src_cpp]
+ sub b32 $r7 1
+ or $r6 $r7
+ iowr I[$r5 + 0x000] $r6
+ add b32 $r5 0x100
+ ld b32 $r6 D[$sp + 0x00]
+ iowr I[$r5 + 0x000] $r6
+ ld b32 $r6 D[$sp + 0x04]
+ iowr I[$r5 + 0x100] $r6
+ ld b32 $r6 D[$sp + 0x08]
+ iowr I[$r5 + 0x200] $r6
+ ld b32 $r6 D[$sp + 0x0c]
+ iowr I[$r5 + 0x300] $r6
+ add b32 $r5 0x400
+ ld b32 $r6 D[$r0 + #ctx_swz_const0]
+ iowr I[$r5 + 0x000] $r6
+ ld b32 $r6 D[$r0 + #ctx_swz_const1]
+ iowr I[$r5 + 0x100] $r6
+ add $sp 0x10
+ ret
+
+// Setup to handle a tiled surface
+//
+// Calculates a number of parameters the hardware requires in order
+// to correctly handle tiling.
+//
+// Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE):
+// nTx = round_up(w * cpp, 1 << Tp) >> Tp
+// nTy = round_up(h, 1 << Th) >> Th
+// Txo = (x * cpp) & ((1 << Tp) - 1)
+// Tx = (x * cpp) >> Tp
+// Tyo = y & ((1 << Th) - 1)
+// Ty = y >> Th
+// Tzo = z & ((1 << Td) - 1)
+// Tz = z >> Td
+//
+// off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo
+// off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp;
+//
+// Inputs:
+// $r4: hw command (0x104800)
+// $r5: ctx offset adjustment for src/dst selection
+// $p2: set if dst surface
+//
+cmd_exec_set_surface_tiled:
+ // translate TILE_MODE into Tp, Th, Td shift values
+ ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
+ extr $r9 $r7 8:11
+ extr $r8 $r7 4:7
+ifdef(`NVA3',
+ add b32 $r8 2
+,
+ add b32 $r8 3
+)
+ extr $r7 $r7 0:3
+ cmp b32 $r7 0xe
+ bra ne #xtile64
+ mov $r7 4
+ bra #xtileok
+ xtile64:
+ xbit $r7 $flags $p2
+ add b32 $r7 17
+ bset $r4 $r7
+ mov $r7 6
+ xtileok:
+
+ // Op = (x * cpp) & ((1 << Tp) - 1)
+ // Tx = (x * cpp) >> Tp
+ ld b32 $r10 D[$r5 + #ctx_src_xoff]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
+ mulu $r10 $r11
+ mov $r11 1
+ shl b32 $r11 $r7
+ sub b32 $r11 1
+ and $r12 $r10 $r11
+ shr b32 $r10 $r7
+
+ // Tyo = y & ((1 << Th) - 1)
+ // Ty = y >> Th
+ ld b32 $r13 D[$r5 + #ctx_src_yoff]
+ mov $r14 1
+ shl b32 $r14 $r8
+ sub b32 $r14 1
+ and $r11 $r13 $r14
+ shr b32 $r13 $r8
+
+ // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo)
+ add b32 $r14 1
+ shl b32 $r15 $r14 12
+ sub b32 $r14 $r11
+ or $r15 $r14
+ xbit $r6 $flags $p2
+ add b32 $r6 0x208
+ shl b32 $r6 8
+ iowr I[$r6 + 0x000] $r15
+
+ // Op += Tyo << Tp
+ shl b32 $r11 $r7
+ add b32 $r12 $r11
+
+ // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
+ ld b32 $r15 D[$r5 + #ctx_src_xsize]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
+ mulu $r15 $r11
+ mov $r11 1
+ shl b32 $r11 $r7
+ sub b32 $r11 1
+ add b32 $r15 $r11
+ shr b32 $r15 $r7
+ push $r15
+
+ // nTy = (h + ((1 << Th) - 1)) >> Th
+ ld b32 $r15 D[$r5 + #ctx_src_ysize]
+ mov $r11 1
+ shl b32 $r11 $r8
+ sub b32 $r11 1
+ add b32 $r15 $r11
+ shr b32 $r15 $r8
+ push $r15
+
+ // Tys = Tp + Th
+ // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td
+ add b32 $r7 $r8
+ sub b32 $r8 2
+ mov $r11 1
+ shl b32 $r11 $r8
+ shl b32 $r11 $r9
+
+ // Tzo = z & ((1 << Td) - 1)
+ // Tz = z >> Td
+ // Op += Tzo << Tys
+ // Ts = Tys + Td
+ ld b32 $r8 D[$r5 + #ctx_src_zoff]
+ mov $r14 1
+ shl b32 $r14 $r9
+ sub b32 $r14 1
+ and $r15 $r8 $r14
+ shl b32 $r15 $r7
+ add b32 $r12 $r15
+ add b32 $r7 $r9
+ shr b32 $r8 $r9
+
+ // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts
+ pop $r15
+ pop $r9
+ mulu $r13 $r9
+ add b32 $r10 $r13
+ mulu $r8 $r9
+ mulu $r8 $r15
+ add b32 $r10 $r8
+ shl b32 $r10 $r7
+
+ // PITCH = (nTx - 1) << Ts
+ sub b32 $r9 1
+ shl b32 $r9 $r7
+ iowr I[$r6 + 0x200] $r9
+
+ // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
+ // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ ld b32 $r8 D[$r5 + #ctx_src_address_high]
+ add b32 $r10 $r12
+ add b32 $r7 $r10
+ adc b32 $r8 0
+ shl b32 $r8 16
+ or $r8 $r11
+ sub b32 $r6 0x600
+ iowr I[$r6 + 0x000] $r7
+ add b32 $r6 0x400
+ iowr I[$r6 + 0x000] $r8
+ ret
+
+// Setup to handle a linear surface
+//
+// Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting
+//
+cmd_exec_set_surface_linear:
+ xbit $r6 $flags $p2
+ add b32 $r6 0x202
+ shl b32 $r6 8
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ iowr I[$r6 + 0x000] $r7
+ add b32 $r6 0x400
+ ld b32 $r7 D[$r5 + #ctx_src_address_high]
+ shl b32 $r7 16
+ iowr I[$r6 + 0x000] $r7
+ add b32 $r6 0x400
+ ld b32 $r7 D[$r5 + #ctx_src_pitch]
+ iowr I[$r6 + 0x000] $r7
+ ret
+
+// wait for regs to be available for use
+cmd_exec_wait:
+ push $r0
+ push $r1
+ mov $r0 0x800
+ shl b32 $r0 6
+ loop:
+ iord $r1 I[$r0]
+ and $r1 1
+ bra ne #loop
+ pop $r1
+ pop $r0
+ ret
+
+cmd_exec_query:
+ // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
+ xbit $r4 $r3 13
+ bra ne #query_counter
+ call #cmd_exec_wait
+ mov $r4 0x80c
+ shl b32 $r4 6
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
+ add b32 $r5 4
+ iowr I[$r4 + 0x000] $r5
+ iowr I[$r4 + 0x100] $r0
+ mov $r5 0xc
+ iowr I[$r4 + 0x200] $r5
+ add b32 $r4 0x400
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
+ shl b32 $r5 16
+ iowr I[$r4 + 0x000] $r5
+ add b32 $r4 0x500
+ mov $r5 0x00000b00
+ sethi $r5 0x00010000
+ iowr I[$r4 + 0x000] $r5
+ mov $r5 0x00004040
+ shl b32 $r5 1
+ sethi $r5 0x80800000
+ iowr I[$r4 + 0x100] $r5
+ mov $r5 0x00001110
+ sethi $r5 0x13120000
+ iowr I[$r4 + 0x200] $r5
+ mov $r5 0x00001514
+ sethi $r5 0x17160000
+ iowr I[$r4 + 0x300] $r5
+ mov $r5 0x00002601
+ sethi $r5 0x00010000
+ mov $r4 0x800
+ shl b32 $r4 6
+ iowr I[$r4 + 0x000] $r5
+
+ // write COUNTER
+ query_counter:
+ call #cmd_exec_wait
+ mov $r4 0x80c
+ shl b32 $r4 6
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
+ iowr I[$r4 + 0x000] $r5
+ iowr I[$r4 + 0x100] $r0
+ mov $r5 0x4
+ iowr I[$r4 + 0x200] $r5
+ add b32 $r4 0x400
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
+ shl b32 $r5 16
+ iowr I[$r4 + 0x000] $r5
+ add b32 $r4 0x500
+ mov $r5 0x00000300
+ iowr I[$r4 + 0x000] $r5
+ mov $r5 0x00001110
+ sethi $r5 0x13120000
+ iowr I[$r4 + 0x100] $r5
+ ld b32 $r5 D[$r0 + #ctx_query_counter]
+ add b32 $r4 0x500
+ iowr I[$r4 + 0x000] $r5
+ mov $r5 0x00002601
+ sethi $r5 0x00010000
+ mov $r4 0x800
+ shl b32 $r4 6
+ iowr I[$r4 + 0x000] $r5
+ ret
+
+// Execute a copy operation
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// 000002000 QUERY_SHORT
+// 000001000 QUERY
+// 000000100 DST_LINEAR
+// 000000010 SRC_LINEAR
+// 000000001 FORMAT
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_exec:
+ call #cmd_exec_wait
+
+ // if format requested, call function to calculate it, otherwise
+ // fill in cpp/xcnt for both surfaces as if (cpp == 1)
+ xbit $r15 $r3 0
+ bra e #cmd_exec_no_format
+ call #cmd_exec_set_format
+ mov $r4 0x200
+ bra #cmd_exec_init_src_surface
+ cmd_exec_no_format:
+ mov $r6 0x810
+ shl b32 $r6 6
+ mov $r7 1
+ st b32 D[$r0 + #ctx_src_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
+ ld b32 $r7 D[$r0 + #ctx_xcnt]
+ iowr I[$r6 + 0x000] $r7
+ iowr I[$r6 + 0x100] $r7
+ clear b32 $r4
+
+ cmd_exec_init_src_surface:
+ bclr $flags $p2
+ clear b32 $r5
+ xbit $r15 $r3 4
+ bra e #src_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_init_dst_surface
+ src_tiled:
+ call #cmd_exec_set_surface_tiled
+ bset $r4 7
+
+ cmd_exec_init_dst_surface:
+ bset $flags $p2
+ mov $r5 #ctx_dst_address_high - #ctx_src_address_high
+ xbit $r15 $r3 8
+ bra e #dst_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_kick
+ dst_tiled:
+ call #cmd_exec_set_surface_tiled
+ bset $r4 8
+
+ cmd_exec_kick:
+ mov $r5 0x800
+ shl b32 $r5 6
+ ld b32 $r6 D[$r0 + #ctx_ycnt]
+ iowr I[$r5 + 0x100] $r6
+ mov $r6 0x0041
+ // SRC_TARGET = 1, DST_TARGET = 2
+ sethi $r6 0x44000000
+ or $r4 $r6
+ iowr I[$r5] $r4
+
+ // if requested, queue up a QUERY write after the copy has completed
+ xbit $r15 $r3 12
+ bra e #cmd_exec_done
+ call #cmd_exec_query
+
+ cmd_exec_done:
+ ret
+
+// Flush write cache
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_wrcache_flush:
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x10000
+ iowr I[$r2] $r3
+ ret
+
+.align 0x100
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
new file mode 100644
index 00000000000..241b2720120
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
@@ -0,0 +1,620 @@
+uint32_t nva3_pcopy_data[] = {
+/* 0x0000: ctx_object */
+ 0x00000000,
+/* 0x0004: ctx_dma */
+/* 0x0004: ctx_dma_query */
+ 0x00000000,
+/* 0x0008: ctx_dma_src */
+ 0x00000000,
+/* 0x000c: ctx_dma_dst */
+ 0x00000000,
+/* 0x0010: ctx_query_address_high */
+ 0x00000000,
+/* 0x0014: ctx_query_address_low */
+ 0x00000000,
+/* 0x0018: ctx_query_counter */
+ 0x00000000,
+/* 0x001c: ctx_src_address_high */
+ 0x00000000,
+/* 0x0020: ctx_src_address_low */
+ 0x00000000,
+/* 0x0024: ctx_src_pitch */
+ 0x00000000,
+/* 0x0028: ctx_src_tile_mode */
+ 0x00000000,
+/* 0x002c: ctx_src_xsize */
+ 0x00000000,
+/* 0x0030: ctx_src_ysize */
+ 0x00000000,
+/* 0x0034: ctx_src_zsize */
+ 0x00000000,
+/* 0x0038: ctx_src_zoff */
+ 0x00000000,
+/* 0x003c: ctx_src_xoff */
+ 0x00000000,
+/* 0x0040: ctx_src_yoff */
+ 0x00000000,
+/* 0x0044: ctx_src_cpp */
+ 0x00000000,
+/* 0x0048: ctx_dst_address_high */
+ 0x00000000,
+/* 0x004c: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0050: ctx_dst_pitch */
+ 0x00000000,
+/* 0x0054: ctx_dst_tile_mode */
+ 0x00000000,
+/* 0x0058: ctx_dst_xsize */
+ 0x00000000,
+/* 0x005c: ctx_dst_ysize */
+ 0x00000000,
+/* 0x0060: ctx_dst_zsize */
+ 0x00000000,
+/* 0x0064: ctx_dst_zoff */
+ 0x00000000,
+/* 0x0068: ctx_dst_xoff */
+ 0x00000000,
+/* 0x006c: ctx_dst_yoff */
+ 0x00000000,
+/* 0x0070: ctx_dst_cpp */
+ 0x00000000,
+/* 0x0074: ctx_format */
+ 0x00000000,
+/* 0x0078: ctx_swz_const0 */
+ 0x00000000,
+/* 0x007c: ctx_swz_const1 */
+ 0x00000000,
+/* 0x0080: ctx_xcnt */
+ 0x00000000,
+/* 0x0084: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00010040,
+ 0x00010160,
+ 0x00000000,
+ 0x00010050,
+ 0x00010162,
+ 0x00000000,
+ 0x00030060,
+/* 0x0128: dispatch_dma */
+ 0x00010170,
+ 0x00000000,
+ 0x00010170,
+ 0x00000000,
+ 0x00010170,
+ 0x00000000,
+ 0x00070080,
+ 0x00000028,
+ 0xfffff000,
+ 0x0000002c,
+ 0xfff80000,
+ 0x00000030,
+ 0xffffe000,
+ 0x00000034,
+ 0xfffff800,
+ 0x00000038,
+ 0xfffff000,
+ 0x0000003c,
+ 0xfff80000,
+ 0x00000040,
+ 0xffffe000,
+ 0x00070088,
+ 0x00000054,
+ 0xfffff000,
+ 0x00000058,
+ 0xfff80000,
+ 0x0000005c,
+ 0xffffe000,
+ 0x00000060,
+ 0xfffff800,
+ 0x00000064,
+ 0xfffff000,
+ 0x00000068,
+ 0xfff80000,
+ 0x0000006c,
+ 0xffffe000,
+ 0x000200c0,
+ 0x00010492,
+ 0x00000000,
+ 0x0001051b,
+ 0x00000000,
+ 0x000e00c3,
+ 0x0000001c,
+ 0xffffff00,
+ 0x00000020,
+ 0x00000000,
+ 0x00000048,
+ 0xffffff00,
+ 0x0000004c,
+ 0x00000000,
+ 0x00000024,
+ 0xfff80000,
+ 0x00000050,
+ 0xfff80000,
+ 0x00000080,
+ 0xffff0000,
+ 0x00000084,
+ 0xffffe000,
+ 0x00000074,
+ 0xfccc0000,
+ 0x00000078,
+ 0x00000000,
+ 0x0000007c,
+ 0x00000000,
+ 0x00000010,
+ 0xffffff00,
+ 0x00000014,
+ 0x00000000,
+ 0x00000018,
+ 0x00000000,
+ 0x00000800,
+};
+
+uint32_t nva3_pcopy_code[] = {
+/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+ 0xf1040017,
+ 0xf0fff327,
+ 0x12d00023,
+ 0x0c25f0c0,
+ 0xf40012d0,
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
+/* 0x0041: ih_no_chsw */
+ 0x0412c472,
+ 0xf4060bf4,
+/* 0x004a: ih_no_cmd */
+ 0x11c4c321,
+ 0x4001d00c,
+/* 0x0052: swctx */
+ 0x47f101f8,
+ 0x4bfe7700,
+ 0x0007fe00,
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
+/* 0x006b: swctx_load */
+ 0xfa060ef4,
+/* 0x006e: swctx_done */
+ 0x03f80504,
+/* 0x0072: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+ 0xf4170bf4,
+ 0x21f40132,
+ 0x1e3af052,
+ 0xf00023d0,
+ 0x24d00147,
+/* 0x0093: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x220bf41e,
+ 0xf40131f4,
+ 0x57f05221,
+ 0x0367f004,
+/* 0x00a8: chsw_load_ctx_dma */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
+/* 0x00bb: chsw_finish_load */
+ 0xf0f018f4,
+ 0x23d00237,
+/* 0x00c3: dispatch */
+ 0xf100f880,
+ 0xcf190037,
+ 0x33cf4032,
+ 0xff24e400,
+ 0x1024b607,
+ 0x010057f1,
+ 0x74bd64bd,
+/* 0x00dc: dispatch_loop */
+ 0x58005658,
+ 0x50b60157,
+ 0x0446b804,
+ 0xbb4d08f4,
+ 0x47b80076,
+ 0x0f08f404,
+ 0xb60276bb,
+ 0x57bb0374,
+ 0xdf0ef400,
+/* 0x0100: dispatch_valid_mthd */
+ 0xb60246bb,
+ 0x45bb0344,
+ 0x01459800,
+ 0xb00453fd,
+ 0x1bf40054,
+ 0x00455820,
+ 0xb0014658,
+ 0x1bf40064,
+ 0x00538009,
+/* 0x0127: dispatch_cmd */
+ 0xf4300ef4,
+ 0x55f90132,
+ 0xf40c01f4,
+/* 0x0132: dispatch_invalid_bitfield */
+ 0x25f0250e,
+/* 0x0135: dispatch_illegal_mthd */
+ 0x0125f002,
+/* 0x0138: dispatch_error */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x27f04043,
+ 0x0002d040,
+/* 0x0148: hostirq_wait */
+ 0xf08002cf,
+ 0x24b04024,
+ 0xf71bf400,
+/* 0x0154: dispatch_done */
+ 0x1d0027f1,
+ 0xd00137f0,
+ 0x00f80023,
+/* 0x0160: cmd_nop */
+/* 0x0162: cmd_pm_trigger */
+ 0x27f100f8,
+ 0x34bd2200,
+ 0xd00233f0,
+ 0x00f80023,
+/* 0x0170: cmd_dma */
+ 0x012842b7,
+ 0xf00145b6,
+ 0x43801e39,
+ 0x0040b701,
+ 0x0644b606,
+ 0xf80043d0,
+/* 0x0189: cmd_exec_set_format */
+ 0xf030f400,
+ 0xb00001b0,
+ 0x01b00101,
+ 0x0301b002,
+ 0xc71d0498,
+ 0x50b63045,
+ 0x3446c701,
+ 0xc70160b6,
+ 0x70b63847,
+ 0x0232f401,
+ 0x94bd84bd,
+/* 0x01b4: ncomp_loop */
+ 0xb60f4ac4,
+ 0xb4bd0445,
+/* 0x01bc: bpc_loop */
+ 0xf404a430,
+ 0xa5ff0f18,
+ 0x00cbbbc0,
+ 0xf40231f4,
+/* 0x01ce: cmp_c0 */
+ 0x1bf4220e,
+ 0x10c7f00c,
+ 0xf400cbbb,
+/* 0x01da: cmp_c1 */
+ 0xa430160e,
+ 0x0c18f406,
+ 0xbb14c7f0,
+ 0x0ef400cb,
+/* 0x01e9: cmp_zero */
+ 0x80c7f107,
+/* 0x01ed: bpc_next */
+ 0x01c83800,
+ 0xb60180b6,
+ 0xb5b801b0,
+ 0xc308f404,
+ 0xb80190b6,
+ 0x08f40497,
+ 0x0065fdb2,
+ 0x98110680,
+ 0x68fd2008,
+ 0x0502f400,
+/* 0x0216: dst_xcnt */
+ 0x75fd64bd,
+ 0x1c078000,
+ 0xf10078fd,
+ 0xb6081057,
+ 0x56d00654,
+ 0x4057d000,
+ 0x080050b7,
+ 0xb61c0698,
+ 0x64b60162,
+ 0x11079808,
+ 0xfd0172b6,
+ 0x56d00567,
+ 0x0050b700,
+ 0x0060b401,
+ 0xb40056d0,
+ 0x56d00160,
+ 0x0260b440,
+ 0xb48056d0,
+ 0x56d00360,
+ 0x0050b7c0,
+ 0x1e069804,
+ 0x980056d0,
+ 0x56d01f06,
+ 0x1030f440,
+/* 0x0276: cmd_exec_set_surface_tiled */
+ 0x579800f8,
+ 0x6879c70a,
+ 0xb66478c7,
+ 0x77c70280,
+ 0x0e76b060,
+ 0xf0091bf4,
+ 0x0ef40477,
+/* 0x0291: xtile64 */
+ 0x027cf00f,
+ 0xfd1170b6,
+ 0x77f00947,
+/* 0x029d: xtileok */
+ 0x0f5a9806,
+ 0xfd115b98,
+ 0xb7f000ab,
+ 0x04b7bb01,
+ 0xff01b2b6,
+ 0xa7bbc4ab,
+ 0x105d9805,
+ 0xbb01e7f0,
+ 0xe2b604e8,
+ 0xb4deff01,
+ 0xb605d8bb,
+ 0xef9401e0,
+ 0x02ebbb0c,
+ 0xf005fefd,
+ 0x60b7026c,
+ 0x64b60208,
+ 0x006fd008,
+ 0xbb04b7bb,
+ 0x5f9800cb,
+ 0x115b980b,
+ 0xf000fbfd,
+ 0xb7bb01b7,
+ 0x01b2b604,
+ 0xbb00fbbb,
+ 0xf0f905f7,
+ 0xf00c5f98,
+ 0xb8bb01b7,
+ 0x01b2b604,
+ 0xbb00fbbb,
+ 0xf0f905f8,
+ 0xb60078bb,
+ 0xb7f00282,
+ 0x04b8bb01,
+ 0x9804b9bb,
+ 0xe7f00e58,
+ 0x04e9bb01,
+ 0xff01e2b6,
+ 0xf7bbf48e,
+ 0x00cfbb04,
+ 0xbb0079bb,
+ 0xf0fc0589,
+ 0xd9fd90fc,
+ 0x00adbb00,
+ 0xfd0089fd,
+ 0xa8bb008f,
+ 0x04a7bb00,
+ 0xbb0192b6,
+ 0x69d00497,
+ 0x08579880,
+ 0xbb075898,
+ 0x7abb00ac,
+ 0x0081b600,
+ 0xfd1084b6,
+ 0x62b7058b,
+ 0x67d00600,
+ 0x0060b700,
+ 0x0068d004,
+/* 0x0382: cmd_exec_set_surface_linear */
+ 0x6cf000f8,
+ 0x0260b702,
+ 0x0864b602,
+ 0xd0085798,
+ 0x60b70067,
+ 0x57980400,
+ 0x1074b607,
+ 0xb70067d0,
+ 0x98040060,
+ 0x67d00957,
+/* 0x03ab: cmd_exec_wait */
+ 0xf900f800,
+ 0xf110f900,
+ 0xb6080007,
+/* 0x03b6: loop */
+ 0x01cf0604,
+ 0x0114f000,
+ 0xfcfa1bf4,
+ 0xf800fc10,
+/* 0x03c5: cmd_exec_query */
+ 0x0d34c800,
+ 0xf5701bf4,
+ 0xf103ab21,
+ 0xb6080c47,
+ 0x05980644,
+ 0x0450b605,
+ 0xd00045d0,
+ 0x57f04040,
+ 0x8045d00c,
+ 0x040040b7,
+ 0xb6040598,
+ 0x45d01054,
+ 0x0040b700,
+ 0x0057f105,
+ 0x0153f00b,
+ 0xf10045d0,
+ 0xb6404057,
+ 0x53f10154,
+ 0x45d08080,
+ 0x1057f140,
+ 0x1253f111,
+ 0x8045d013,
+ 0x151457f1,
+ 0x171653f1,
+ 0xf1c045d0,
+ 0xf0260157,
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
+/* 0x0438: query_counter */
+ 0x03ab21f5,
+ 0x080c47f1,
+ 0x980644b6,
+ 0x45d00505,
+ 0x4040d000,
+ 0xd00457f0,
+ 0x40b78045,
+ 0x05980400,
+ 0x1054b604,
+ 0xb70045d0,
+ 0xf1050040,
+ 0xd0030057,
+ 0x57f10045,
+ 0x53f11110,
+ 0x45d01312,
+ 0x06059840,
+ 0x050040b7,
+ 0xf10045d0,
+ 0xf0260157,
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
+/* 0x0492: cmd_exec */
+ 0x21f500f8,
+ 0x3fc803ab,
+ 0x0e0bf400,
+ 0x018921f5,
+ 0x020047f1,
+/* 0x04a7: cmd_exec_no_format */
+ 0xf11e0ef4,
+ 0xb6081067,
+ 0x77f00664,
+ 0x11078001,
+ 0x981c0780,
+ 0x67d02007,
+ 0x4067d000,
+/* 0x04c2: cmd_exec_init_src_surface */
+ 0x32f444bd,
+ 0xc854bd02,
+ 0x0bf4043f,
+ 0x8221f50a,
+ 0x0a0ef403,
+/* 0x04d4: src_tiled */
+ 0x027621f5,
+/* 0x04db: cmd_exec_init_dst_surface */
+ 0xf40749f0,
+ 0x57f00231,
+ 0x083fc82c,
+ 0xf50a0bf4,
+ 0xf4038221,
+/* 0x04ee: dst_tiled */
+ 0x21f50a0e,
+ 0x49f00276,
+/* 0x04f5: cmd_exec_kick */
+ 0x0057f108,
+ 0x0654b608,
+ 0xd0210698,
+ 0x67f04056,
+ 0x0063f141,
+ 0x0546fd44,
+ 0xc80054d0,
+ 0x0bf40c3f,
+ 0xc521f507,
+/* 0x0519: cmd_exec_done */
+/* 0x051b: cmd_wrcache_flush */
+ 0xf100f803,
+ 0xbd220027,
+ 0x0133f034,
+ 0xf80023d0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
new file mode 100644
index 00000000000..98cc4216a37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
@@ -0,0 +1,606 @@
+uint32_t nvc0_pcopy_data[] = {
+/* 0x0000: ctx_object */
+ 0x00000000,
+/* 0x0004: ctx_query_address_high */
+ 0x00000000,
+/* 0x0008: ctx_query_address_low */
+ 0x00000000,
+/* 0x000c: ctx_query_counter */
+ 0x00000000,
+/* 0x0010: ctx_src_address_high */
+ 0x00000000,
+/* 0x0014: ctx_src_address_low */
+ 0x00000000,
+/* 0x0018: ctx_src_pitch */
+ 0x00000000,
+/* 0x001c: ctx_src_tile_mode */
+ 0x00000000,
+/* 0x0020: ctx_src_xsize */
+ 0x00000000,
+/* 0x0024: ctx_src_ysize */
+ 0x00000000,
+/* 0x0028: ctx_src_zsize */
+ 0x00000000,
+/* 0x002c: ctx_src_zoff */
+ 0x00000000,
+/* 0x0030: ctx_src_xoff */
+ 0x00000000,
+/* 0x0034: ctx_src_yoff */
+ 0x00000000,
+/* 0x0038: ctx_src_cpp */
+ 0x00000000,
+/* 0x003c: ctx_dst_address_high */
+ 0x00000000,
+/* 0x0040: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0044: ctx_dst_pitch */
+ 0x00000000,
+/* 0x0048: ctx_dst_tile_mode */
+ 0x00000000,
+/* 0x004c: ctx_dst_xsize */
+ 0x00000000,
+/* 0x0050: ctx_dst_ysize */
+ 0x00000000,
+/* 0x0054: ctx_dst_zsize */
+ 0x00000000,
+/* 0x0058: ctx_dst_zoff */
+ 0x00000000,
+/* 0x005c: ctx_dst_xoff */
+ 0x00000000,
+/* 0x0060: ctx_dst_yoff */
+ 0x00000000,
+/* 0x0064: ctx_dst_cpp */
+ 0x00000000,
+/* 0x0068: ctx_format */
+ 0x00000000,
+/* 0x006c: ctx_swz_const0 */
+ 0x00000000,
+/* 0x0070: ctx_swz_const1 */
+ 0x00000000,
+/* 0x0074: ctx_xcnt */
+ 0x00000000,
+/* 0x0078: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00010040,
+ 0x0001019f,
+ 0x00000000,
+ 0x00010050,
+ 0x000101a1,
+ 0x00000000,
+ 0x00070080,
+ 0x0000001c,
+ 0xfffff000,
+ 0x00000020,
+ 0xfff80000,
+ 0x00000024,
+ 0xffffe000,
+ 0x00000028,
+ 0xfffff800,
+ 0x0000002c,
+ 0xfffff000,
+ 0x00000030,
+ 0xfff80000,
+ 0x00000034,
+ 0xffffe000,
+ 0x00070088,
+ 0x00000048,
+ 0xfffff000,
+ 0x0000004c,
+ 0xfff80000,
+ 0x00000050,
+ 0xffffe000,
+ 0x00000054,
+ 0xfffff800,
+ 0x00000058,
+ 0xfffff000,
+ 0x0000005c,
+ 0xfff80000,
+ 0x00000060,
+ 0xffffe000,
+ 0x000200c0,
+ 0x000104b8,
+ 0x00000000,
+ 0x00010541,
+ 0x00000000,
+ 0x000e00c3,
+ 0x00000010,
+ 0xffffff00,
+ 0x00000014,
+ 0x00000000,
+ 0x0000003c,
+ 0xffffff00,
+ 0x00000040,
+ 0x00000000,
+ 0x00000018,
+ 0xfff80000,
+ 0x00000044,
+ 0xfff80000,
+ 0x00000074,
+ 0xffff0000,
+ 0x00000078,
+ 0xffffe000,
+ 0x00000068,
+ 0xfccc0000,
+ 0x0000006c,
+ 0x00000000,
+ 0x00000070,
+ 0x00000000,
+ 0x00000004,
+ 0xffffff00,
+ 0x00000008,
+ 0x00000000,
+ 0x0000000c,
+ 0x00000000,
+ 0x00000800,
+};
+
+uint32_t nvc0_pcopy_code[] = {
+/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+ 0xf1040017,
+ 0xf0fff327,
+ 0x12d00023,
+ 0x0c25f0c0,
+ 0xf40012d0,
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
+/* 0x0041: ih_no_chsw */
+ 0x0412c4ca,
+ 0xf5070bf4,
+/* 0x004b: ih_no_cmd */
+ 0xc4010221,
+ 0x01d00c11,
+/* 0x0053: swctx */
+ 0xf101f840,
+ 0xfe770047,
+ 0x47f1004b,
+ 0x44cf2100,
+ 0x0144f000,
+ 0xb60444b6,
+ 0xf7f13040,
+ 0xf4b6061c,
+ 0x1457f106,
+ 0x00f5d101,
+ 0xb6043594,
+ 0x57fe0250,
+ 0x0145fe00,
+ 0x010052b7,
+ 0x00ff67f1,
+ 0x56fd60bd,
+ 0x0253f004,
+ 0xf80545fa,
+ 0x0053f003,
+ 0xd100e7f0,
+ 0x549800fe,
+ 0x0845b600,
+ 0xb6015698,
+ 0x46fd1864,
+ 0x0047fe05,
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
+/* 0x00c3: swctx_load */
+ 0xfa060ef4,
+/* 0x00c6: swctx_done */
+ 0x03f80504,
+/* 0x00ca: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+ 0xf4170bf4,
+ 0x21f40132,
+ 0x1e3af053,
+ 0xf00023d0,
+ 0x24d00147,
+/* 0x00eb: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x090bf41e,
+ 0xf40131f4,
+/* 0x00fa: chsw_finish_load */
+ 0x37f05321,
+ 0x8023d002,
+/* 0x0102: dispatch */
+ 0x37f100f8,
+ 0x32cf1900,
+ 0x0033cf40,
+ 0x07ff24e4,
+ 0xf11024b6,
+ 0xbd010057,
+/* 0x011b: dispatch_loop */
+ 0x5874bd64,
+ 0x57580056,
+ 0x0450b601,
+ 0xf40446b8,
+ 0x76bb4d08,
+ 0x0447b800,
+ 0xbb0f08f4,
+ 0x74b60276,
+ 0x0057bb03,
+/* 0x013f: dispatch_valid_mthd */
+ 0xbbdf0ef4,
+ 0x44b60246,
+ 0x0045bb03,
+ 0xfd014598,
+ 0x54b00453,
+ 0x201bf400,
+ 0x58004558,
+ 0x64b00146,
+ 0x091bf400,
+ 0xf4005380,
+/* 0x0166: dispatch_cmd */
+ 0x32f4300e,
+ 0xf455f901,
+ 0x0ef40c01,
+/* 0x0171: dispatch_invalid_bitfield */
+ 0x0225f025,
+/* 0x0174: dispatch_illegal_mthd */
+/* 0x0177: dispatch_error */
+ 0xf10125f0,
+ 0xd0100047,
+ 0x43d00042,
+ 0x4027f040,
+/* 0x0187: hostirq_wait */
+ 0xcf0002d0,
+ 0x24f08002,
+ 0x0024b040,
+/* 0x0193: dispatch_done */
+ 0xf1f71bf4,
+ 0xf01d0027,
+ 0x23d00137,
+/* 0x019f: cmd_nop */
+ 0xf800f800,
+/* 0x01a1: cmd_pm_trigger */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00233,
+/* 0x01af: cmd_exec_set_format */
+ 0xf400f800,
+ 0x01b0f030,
+ 0x0101b000,
+ 0xb00201b0,
+ 0x04980301,
+ 0x3045c71a,
+ 0xc70150b6,
+ 0x60b63446,
+ 0x3847c701,
+ 0xf40170b6,
+ 0x84bd0232,
+/* 0x01da: ncomp_loop */
+ 0x4ac494bd,
+ 0x0445b60f,
+/* 0x01e2: bpc_loop */
+ 0xa430b4bd,
+ 0x0f18f404,
+ 0xbbc0a5ff,
+ 0x31f400cb,
+ 0x220ef402,
+/* 0x01f4: cmp_c0 */
+ 0xf00c1bf4,
+ 0xcbbb10c7,
+ 0x160ef400,
+/* 0x0200: cmp_c1 */
+ 0xf406a430,
+ 0xc7f00c18,
+ 0x00cbbb14,
+/* 0x020f: cmp_zero */
+ 0xf1070ef4,
+/* 0x0213: bpc_next */
+ 0x380080c7,
+ 0x80b601c8,
+ 0x01b0b601,
+ 0xf404b5b8,
+ 0x90b6c308,
+ 0x0497b801,
+ 0xfdb208f4,
+ 0x06800065,
+ 0x1d08980e,
+ 0xf40068fd,
+ 0x64bd0502,
+/* 0x023c: dst_xcnt */
+ 0x800075fd,
+ 0x78fd1907,
+ 0x1057f100,
+ 0x0654b608,
+ 0xd00056d0,
+ 0x50b74057,
+ 0x06980800,
+ 0x0162b619,
+ 0x980864b6,
+ 0x72b60e07,
+ 0x0567fd01,
+ 0xb70056d0,
+ 0xb4010050,
+ 0x56d00060,
+ 0x0160b400,
+ 0xb44056d0,
+ 0x56d00260,
+ 0x0360b480,
+ 0xb7c056d0,
+ 0x98040050,
+ 0x56d01b06,
+ 0x1c069800,
+ 0xf44056d0,
+ 0x00f81030,
+/* 0x029c: cmd_exec_set_surface_tiled */
+ 0xc7075798,
+ 0x78c76879,
+ 0x0380b664,
+ 0xb06077c7,
+ 0x1bf40e76,
+ 0x0477f009,
+/* 0x02b7: xtile64 */
+ 0xf00f0ef4,
+ 0x70b6027c,
+ 0x0947fd11,
+/* 0x02c3: xtileok */
+ 0x980677f0,
+ 0x5b980c5a,
+ 0x00abfd0e,
+ 0xbb01b7f0,
+ 0xb2b604b7,
+ 0xc4abff01,
+ 0x9805a7bb,
+ 0xe7f00d5d,
+ 0x04e8bb01,
+ 0xff01e2b6,
+ 0xd8bbb4de,
+ 0x01e0b605,
+ 0xbb0cef94,
+ 0xfefd02eb,
+ 0x026cf005,
+ 0x020860b7,
+ 0xd00864b6,
+ 0xb7bb006f,
+ 0x00cbbb04,
+ 0x98085f98,
+ 0xfbfd0e5b,
+ 0x01b7f000,
+ 0xb604b7bb,
+ 0xfbbb01b2,
+ 0x05f7bb00,
+ 0x5f98f0f9,
+ 0x01b7f009,
+ 0xb604b8bb,
+ 0xfbbb01b2,
+ 0x05f8bb00,
+ 0x78bbf0f9,
+ 0x0282b600,
+ 0xbb01b7f0,
+ 0xb9bb04b8,
+ 0x0b589804,
+ 0xbb01e7f0,
+ 0xe2b604e9,
+ 0xf48eff01,
+ 0xbb04f7bb,
+ 0x79bb00cf,
+ 0x0589bb00,
+ 0x90fcf0fc,
+ 0xbb00d9fd,
+ 0x89fd00ad,
+ 0x008ffd00,
+ 0xbb00a8bb,
+ 0x92b604a7,
+ 0x0497bb01,
+ 0x988069d0,
+ 0x58980557,
+ 0x00acbb04,
+ 0xb6007abb,
+ 0x84b60081,
+ 0x058bfd10,
+ 0x060062b7,
+ 0xb70067d0,
+ 0xd0040060,
+ 0x00f80068,
+/* 0x03a8: cmd_exec_set_surface_linear */
+ 0xb7026cf0,
+ 0xb6020260,
+ 0x57980864,
+ 0x0067d005,
+ 0x040060b7,
+ 0xb6045798,
+ 0x67d01074,
+ 0x0060b700,
+ 0x06579804,
+ 0xf80067d0,
+/* 0x03d1: cmd_exec_wait */
+ 0xf900f900,
+ 0x0007f110,
+ 0x0604b608,
+/* 0x03dc: loop */
+ 0xf00001cf,
+ 0x1bf40114,
+ 0xfc10fcfa,
+/* 0x03eb: cmd_exec_query */
+ 0xc800f800,
+ 0x1bf40d34,
+ 0xd121f570,
+ 0x0c47f103,
+ 0x0644b608,
+ 0xb6020598,
+ 0x45d00450,
+ 0x4040d000,
+ 0xd00c57f0,
+ 0x40b78045,
+ 0x05980400,
+ 0x1054b601,
+ 0xb70045d0,
+ 0xf1050040,
+ 0xf00b0057,
+ 0x45d00153,
+ 0x4057f100,
+ 0x0154b640,
+ 0x808053f1,
+ 0xf14045d0,
+ 0xf1111057,
+ 0xd0131253,
+ 0x57f18045,
+ 0x53f11514,
+ 0x45d01716,
+ 0x0157f1c0,
+ 0x0153f026,
+ 0x080047f1,
+ 0xd00644b6,
+/* 0x045e: query_counter */
+ 0x21f50045,
+ 0x47f103d1,
+ 0x44b6080c,
+ 0x02059806,
+ 0xd00045d0,
+ 0x57f04040,
+ 0x8045d004,
+ 0x040040b7,
+ 0xb6010598,
+ 0x45d01054,
+ 0x0040b700,
+ 0x0057f105,
+ 0x0045d003,
+ 0x111057f1,
+ 0x131253f1,
+ 0x984045d0,
+ 0x40b70305,
+ 0x45d00500,
+ 0x0157f100,
+ 0x0153f026,
+ 0x080047f1,
+ 0xd00644b6,
+ 0x00f80045,
+/* 0x04b8: cmd_exec */
+ 0x03d121f5,
+ 0xf4003fc8,
+ 0x21f50e0b,
+ 0x47f101af,
+ 0x0ef40200,
+/* 0x04cd: cmd_exec_no_format */
+ 0x1067f11e,
+ 0x0664b608,
+ 0x800177f0,
+ 0x07800e07,
+ 0x1d079819,
+ 0xd00067d0,
+ 0x44bd4067,
+/* 0x04e8: cmd_exec_init_src_surface */
+ 0xbd0232f4,
+ 0x043fc854,
+ 0xf50a0bf4,
+ 0xf403a821,
+/* 0x04fa: src_tiled */
+ 0x21f50a0e,
+ 0x49f0029c,
+/* 0x0501: cmd_exec_init_dst_surface */
+ 0x0231f407,
+ 0xc82c57f0,
+ 0x0bf4083f,
+ 0xa821f50a,
+ 0x0a0ef403,
+/* 0x0514: dst_tiled */
+ 0x029c21f5,
+/* 0x051b: cmd_exec_kick */
+ 0xf10849f0,
+ 0xb6080057,
+ 0x06980654,
+ 0x4056d01e,
+ 0xf14167f0,
+ 0xfd440063,
+ 0x54d00546,
+ 0x0c3fc800,
+ 0xf5070bf4,
+/* 0x053f: cmd_exec_done */
+ 0xf803eb21,
+/* 0x0541: cmd_wrcache_flush */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00133,
+ 0x0000f800,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
new file mode 100644
index 00000000000..f31527733e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/falcon.h>
+#include <engine/fifo.h>
+#include <engine/copy.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+#include <core/client.h>
+#include <core/class.h>
+#include <core/enum.h>
+
+
+#include "fuc/nva3.fuc.h"
+
+struct nva3_copy_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva3_copy_sclass[] = {
+ { 0x85b5, &nouveau_object_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva3_copy_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+
+ },
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nva3_copy_isr_error_name[] = {
+ { 0x0001, "ILLEGAL_MTHD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "INVALID_BITFIELD" },
+ {}
+};
+
+void
+nva3_copy_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_falcon *falcon = (void *)subdev;
+ struct nouveau_object *engctx;
+ u32 dispatch = nv_ro32(falcon, 0x01c);
+ u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+ u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+ u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+ u32 addr = nv_ro32(falcon, 0x040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_ro32(falcon, 0x044);
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat & 0x00000040) {
+ nv_error(falcon, "DISPATCH_ERROR [");
+ nouveau_enum_print(nva3_copy_isr_error_name, ssta);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ mthd, data);
+ nv_wo32(falcon, 0x004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+ nv_wo32(falcon, 0x004, stat);
+ }
+
+ nouveau_engctx_put(engctx);
+}
+
+static int
+nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ bool enable = (nv_device(parent)->chipset != 0xaf);
+ struct nva3_copy_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+ "PCE0", "copy0", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00802000;
+ nv_subdev(priv)->intr = nva3_copy_intr;
+ nv_engine(priv)->cclass = &nva3_copy_cclass;
+ nv_engine(priv)->sclass = nva3_copy_sclass;
+ nv_falcon(priv)->code.data = nva3_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+ nv_falcon(priv)->data.data = nva3_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
+ return 0;
+}
+
+struct nouveau_oclass
+nva3_copy_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_copy_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
new file mode 100644
index 00000000000..ac3291f781f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/falcon.h>
+#include <engine/fifo.h>
+#include <engine/copy.h>
+
+#include <core/class.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/enum.h>
+
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_copy_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_copy0_sclass[] = {
+ { 0x90b5, &nouveau_object_ofuncs },
+ {},
+};
+
+static struct nouveau_oclass
+nvc0_copy1_sclass[] = {
+ { 0x90b8, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static struct nouveau_ofuncs
+nvc0_copy_context_ofuncs = {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+};
+
+static struct nouveau_oclass
+nvc0_copy0_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xc0),
+ .ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+static struct nouveau_oclass
+nvc0_copy1_cclass = {
+ .handle = NV_ENGCTX(COPY1, 0xc0),
+ .ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_copy_init(struct nouveau_object *object)
+{
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+ return 0;
+}
+
+static int
+nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_copy_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+ "PCE0", "copy0", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = nva3_copy_intr;
+ nv_engine(priv)->cclass = &nvc0_copy0_cclass;
+ nv_engine(priv)->sclass = nvc0_copy0_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
+ return 0;
+}
+
+static int
+nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_copy_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+ "PCE1", "copy1", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = nva3_copy_intr;
+ nv_engine(priv)->cclass = &nvc0_copy1_cclass;
+ nv_engine(priv)->sclass = nvc0_copy1_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_copy0_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_copy0_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_copy_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
+
+struct nouveau_oclass
+nvc0_copy1_oclass = {
+ .handle = NV_ENGINE(COPY1, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_copy1_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_copy_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
new file mode 100644
index 00000000000..748a61eb3c6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/copy.h>
+
+struct nve0_copy_priv {
+ struct nouveau_engine base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_copy_sclass[] = {
+ { 0xa0b5, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static struct nouveau_ofuncs
+nve0_copy_context_ofuncs = {
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+};
+
+static struct nouveau_oclass
+nve0_copy_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xc0),
+ .ofuncs = &nve0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static void
+nve0_copy_intr(struct nouveau_subdev *subdev)
+{
+ const int ce = nv_subidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
+ struct nve0_copy_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
+
+ if (stat) {
+ nv_warn(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+ }
+}
+
+static int
+nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_copy_priv *priv;
+ int ret;
+
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE0", "copy0", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = nve0_copy_intr;
+ nv_engine(priv)->cclass = &nve0_copy_cclass;
+ nv_engine(priv)->sclass = nve0_copy_sclass;
+ return 0;
+}
+
+static int
+nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_copy_priv *priv;
+ int ret;
+
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE1", "copy1", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = nve0_copy_intr;
+ nv_engine(priv)->cclass = &nve0_copy_cclass;
+ nv_engine(priv)->sclass = nve0_copy_sclass;
+ return 0;
+}
+
+static int
+nve0_copy2_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_copy_priv *priv;
+ int ret;
+
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE2", "copy2", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00200000;
+ nv_subdev(priv)->intr = nve0_copy_intr;
+ nv_engine(priv)->cclass = &nve0_copy_cclass;
+ nv_engine(priv)->sclass = nve0_copy_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_copy0_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_copy0_ctor,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
+ },
+};
+
+struct nouveau_oclass
+nve0_copy1_oclass = {
+ .handle = NV_ENGINE(COPY1, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_copy1_ctor,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
+ },
+};
+
+struct nouveau_oclass
+nve0_copy2_oclass = {
+ .handle = NV_ENGINE(COPY2, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_copy2_ctor,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
new file mode 100644
index 00000000000..629da02dc35
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
@@ -0,0 +1,698 @@
+/*
+ * fuc microcode for nv98 pcrypt engine
+ * Copyright (C) 2010 Marcin Kościelnicki
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+.section #nv98_pcrypt_data
+
+ctx_dma:
+ctx_dma_query: .b32 0
+ctx_dma_src: .b32 0
+ctx_dma_dst: .b32 0
+.equ #dma_count 3
+ctx_query_address_high: .b32 0
+ctx_query_address_low: .b32 0
+ctx_query_counter: .b32 0
+ctx_cond_address_high: .b32 0
+ctx_cond_address_low: .b32 0
+ctx_cond_off: .b32 0
+ctx_src_address_high: .b32 0
+ctx_src_address_low: .b32 0
+ctx_dst_address_high: .b32 0
+ctx_dst_address_low: .b32 0
+ctx_mode: .b32 0
+.align 16
+ctx_key: .skip 16
+ctx_iv: .skip 16
+
+.align 0x80
+swap:
+.skip 32
+
+.align 8
+common_cmd_dtable:
+.b32 #ctx_query_address_high + 0x20000 ~0xff
+.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_query_counter + 0x20000 ~0xffffffff
+.b32 #cmd_query_get + 0x00000 ~1
+.b32 #ctx_cond_address_high + 0x20000 ~0xff
+.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
+.b32 #cmd_cond_mode + 0x00000 ~7
+.b32 #cmd_wrcache_flush + 0x00000 ~0
+.equ #common_cmd_max 0x88
+
+
+.align 8
+engine_cmd_dtable:
+.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_src_address_high + 0x20000 ~0xff
+.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_dst_address_high + 0x20000 ~0xff
+.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
+.b32 #crypt_cmd_mode + 0x00000 ~0xf
+.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
+.equ #engine_cmd_max 0xce
+
+.align 4
+crypt_dtable:
+.b16 #crypt_copy_prep #crypt_do_inout
+.b16 #crypt_store_prep #crypt_do_out
+.b16 #crypt_ecb_e_prep #crypt_do_inout
+.b16 #crypt_ecb_d_prep #crypt_do_inout
+.b16 #crypt_cbc_e_prep #crypt_do_inout
+.b16 #crypt_cbc_d_prep #crypt_do_inout
+.b16 #crypt_pcbc_e_prep #crypt_do_inout
+.b16 #crypt_pcbc_d_prep #crypt_do_inout
+.b16 #crypt_cfb_e_prep #crypt_do_inout
+.b16 #crypt_cfb_d_prep #crypt_do_inout
+.b16 #crypt_ofb_prep #crypt_do_inout
+.b16 #crypt_ctr_prep #crypt_do_inout
+.b16 #crypt_cbc_mac_prep #crypt_do_in
+.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
+.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
+
+.align 0x100
+
+.section #nv98_pcrypt_code
+
+ // $r0 is always set to 0 in our code - this allows some space savings.
+ clear b32 $r0
+
+ // set up the interrupt handler
+ mov $r1 #ih
+ mov $iv0 $r1
+
+ // init stack pointer
+ mov $sp $r0
+
+ // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
+ movw $r1 0xfff0
+ sethi $r1 0
+ mov $r2 0x400
+ iowr I[$r2 + 0x300] $r1
+
+ // enable the interrupts
+ or $r1 0xc
+ iowr I[$r2] $r1
+
+ // enable fifo access and context switching
+ mov $r1 3
+ mov $r2 0x1200
+ iowr I[$r2] $r1
+
+ // enable i0 delivery
+ bset $flags ie0
+
+ // sleep forver, waking only for interrupts.
+ bset $flags $p0
+ spin:
+ sleep $p0
+ bra #spin
+
+// i0 handler
+ih:
+ // see which interrupts we got
+ iord $r1 I[$r0 + 0x200]
+
+ and $r2 $r1 0x8
+ cmpu b32 $r2 0
+ bra e #noctx
+
+ // context switch... prepare the regs for xfer
+ mov $r2 0x7700
+ mov $xtargets $r2
+ mov $xdbase $r0
+ // 128-byte context.
+ mov $r2 0
+ sethi $r2 0x50000
+
+ // read current channel
+ mov $r3 0x1400
+ iord $r4 I[$r3]
+ // if bit 30 set, it's active, so we have to unload it first.
+ shl b32 $r5 $r4 1
+ cmps b32 $r5 0
+ bra nc #ctxload
+
+ // unload the current channel - save the context
+ xdst $r0 $r2
+ xdwait
+ // and clear bit 30, then write back
+ bclr $r4 0x1e
+ iowr I[$r3] $r4
+ // tell PFIFO we unloaded
+ mov $r4 1
+ iowr I[$r3 + 0x200] $r4
+
+ bra #noctx
+
+ ctxload:
+ // no channel loaded - perhaps we're requested to load one
+ iord $r4 I[$r3 + 0x100]
+ shl b32 $r15 $r4 1
+ cmps b32 $r15 0
+ // if bit 30 of next channel not set, probably PFIFO is just
+ // killing a context. do a faux load, without the active bit.
+ bra nc #dummyload
+
+ // ok, do a real context load.
+ xdld $r0 $r2
+ xdwait
+ mov $r5 #ctx_dma
+ mov $r6 #dma_count - 1
+ ctxload_dma_loop:
+ ld b32 $r7 D[$r5 + $r6 * 4]
+ add b32 $r8 $r6 0x180
+ shl b32 $r8 8
+ iowr I[$r8] $r7
+ sub b32 $r6 1
+ bra nc #ctxload_dma_loop
+
+ dummyload:
+ // tell PFIFO we're done
+ mov $r5 2
+ iowr I[$r3 + 0x200] $r5
+
+ noctx:
+ and $r2 $r1 0x4
+ cmpu b32 $r2 0
+ bra e #nocmd
+
+ // incoming fifo command.
+ mov $r3 0x1900
+ iord $r2 I[$r3 + 0x100]
+ iord $r3 I[$r3]
+ // extract the method
+ and $r4 $r2 0x7ff
+ // shift the addr to proper position if we need to interrupt later
+ shl b32 $r2 0x10
+
+ // mthd 0 and 0x100 [NAME, NOP]: ignore
+ and $r5 $r4 0x7bf
+ cmpu b32 $r5 0
+ bra e #cmddone
+
+ mov $r5 #engine_cmd_dtable - 0xc0 * 8
+ mov $r6 #engine_cmd_max
+ cmpu b32 $r4 0xc0
+ bra nc #dtable_cmd
+ mov $r5 #common_cmd_dtable - 0x80 * 8
+ mov $r6 #common_cmd_max
+ cmpu b32 $r4 0x80
+ bra nc #dtable_cmd
+ cmpu b32 $r4 0x60
+ bra nc #dma_cmd
+ cmpu b32 $r4 0x50
+ bra ne #illegal_mthd
+
+ // mthd 0x140: PM_TRIGGER
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x20000
+ iowr I[$r2] $r3
+ bra #cmddone
+
+ dma_cmd:
+ // mthd 0x180...: DMA_*
+ cmpu b32 $r4 0x60+#dma_count
+ bra nc #illegal_mthd
+ shl b32 $r5 $r4 2
+ add b32 $r5 ((#ctx_dma - 0x60 * 4) & 0xffff)
+ bset $r3 0x1e
+ st b32 D[$r5] $r3
+ add b32 $r4 0x180 - 0x60
+ shl b32 $r4 8
+ iowr I[$r4] $r3
+ bra #cmddone
+
+ dtable_cmd:
+ cmpu b32 $r4 $r6
+ bra nc #illegal_mthd
+ shl b32 $r4 3
+ add b32 $r4 $r5
+ ld b32 $r5 D[$r4 + 4]
+ and $r5 $r3
+ cmpu b32 $r5 0
+ bra ne #invalid_bitfield
+ ld b16 $r5 D[$r4]
+ ld b16 $r6 D[$r4 + 2]
+ cmpu b32 $r6 2
+ bra e #cmd_setctx
+ ld b32 $r7 D[$r0 + #ctx_cond_off]
+ and $r6 $r7
+ cmpu b32 $r6 1
+ bra e #cmddone
+ call $r5
+ bra $p1 #dispatch_error
+ bra #cmddone
+
+ cmd_setctx:
+ st b32 D[$r5] $r3
+ bra #cmddone
+
+
+ invalid_bitfield:
+ or $r2 1
+ dispatch_error:
+ illegal_mthd:
+ mov $r4 0x1000
+ iowr I[$r4] $r2
+ iowr I[$r4 + 0x100] $r3
+ mov $r4 0x40
+ iowr I[$r0] $r4
+
+ im_loop:
+ iord $r4 I[$r0 + 0x200]
+ and $r4 0x40
+ cmpu b32 $r4 0
+ bra ne #im_loop
+
+ cmddone:
+ // remove the command from FIFO
+ mov $r3 0x1d00
+ mov $r4 1
+ iowr I[$r3] $r4
+
+ nocmd:
+ // ack the processed interrupts
+ and $r1 $r1 0xc
+ iowr I[$r0 + 0x100] $r1
+iret
+
+cmd_query_get:
+ // if bit 0 of param set, trigger interrupt afterwards.
+ setp $p1 $r3
+ or $r2 3
+
+ // read PTIMER, beware of races...
+ mov $r4 0xb00
+ ptimer_retry:
+ iord $r6 I[$r4 + 0x100]
+ iord $r5 I[$r4]
+ iord $r7 I[$r4 + 0x100]
+ cmpu b32 $r6 $r7
+ bra ne #ptimer_retry
+
+ // prepare the query structure
+ ld b32 $r4 D[$r0 + #ctx_query_counter]
+ st b32 D[$r0 + #swap + 0x0] $r4
+ st b32 D[$r0 + #swap + 0x4] $r0
+ st b32 D[$r0 + #swap + 0x8] $r5
+ st b32 D[$r0 + #swap + 0xc] $r6
+
+ // will use target 0, DMA_QUERY.
+ mov $xtargets $r0
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_high]
+ shl b32 $r4 0x18
+ mov $xdbase $r4
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_low]
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdst $r4 $r5
+ xdwait
+
+ ret
+
+cmd_cond_mode:
+ // if >= 5, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 5
+ bra nc #return
+
+ // otherwise, no error.
+ bclr $flags $p1
+
+ // if < 2, no QUERY object is involved
+ cmpu b32 $r3 2
+ bra nc #cmd_cond_mode_queryful
+
+ xor $r3 1
+ st b32 D[$r0 + #ctx_cond_off] $r3
+ return:
+ ret
+
+ cmd_cond_mode_queryful:
+ // ok, will need to pull a QUERY object, prepare offsets
+ ld b32 $r4 D[$r0 + #ctx_cond_address_high]
+ ld b32 $r5 D[$r0 + #ctx_cond_address_low]
+ and $r6 $r5 0xff
+ shr b32 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r5
+ mov $xdbase $r4
+ mov $xtargets $r0
+
+ // pull the first one
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdld $r6 $r5
+
+ // if == 2, only a single QUERY is involved...
+ cmpu b32 $r3 2
+ bra ne #cmd_cond_mode_double
+
+ xdwait
+ ld b32 $r4 D[$r0 + #swap + 4]
+ cmpu b32 $r4 0
+ xbit $r4 $flags z
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+ // ok, we'll need to pull second one too
+ cmd_cond_mode_double:
+ add b32 $r6 0x10
+ add b32 $r5 0x10
+ xdld $r6 $r5
+ xdwait
+
+ // compare COUNTERs
+ ld b32 $r5 D[$r0 + #swap + 0x00]
+ ld b32 $r6 D[$r0 + #swap + 0x10]
+ cmpu b32 $r5 $r6
+ xbit $r4 $flags z
+
+ // compare RESen
+ ld b32 $r5 D[$r0 + #swap + 0x04]
+ ld b32 $r6 D[$r0 + #swap + 0x14]
+ cmpu b32 $r5 $r6
+ xbit $r5 $flags z
+ and $r4 $r5
+
+ // and negate or not, depending on mode
+ cmpu b32 $r3 3
+ xbit $r5 $flags z
+ xor $r4 $r5
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+cmd_wrcache_flush:
+ bclr $flags $p1
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x10000
+ iowr I[$r2] $r3
+ ret
+
+crypt_cmd_mode:
+ // if >= 0xf, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 0xf
+ bra nc #crypt_cmd_mode_return
+
+ bclr $flags $p1
+ st b32 D[$r0 + #ctx_mode] $r3
+
+ crypt_cmd_mode_return:
+ ret
+
+crypt_cmd_length:
+ // nop if length == 0
+ cmpu b32 $r3 0
+ bra e #crypt_cmd_mode_return
+
+ // init key, IV
+ cxset 3
+ mov $r4 #ctx_key
+ sethi $r4 0x70000
+ xdst $r0 $r4
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdst $r0 $r4
+ xdwait
+ ckeyreg $c7
+
+ // prepare the targets
+ mov $r4 0x2100
+ mov $xtargets $r4
+
+ // prepare src address
+ ld b32 $r4 D[$r0 + #ctx_src_address_high]
+ ld b32 $r5 D[$r0 + #ctx_src_address_low]
+ shr b32 $r8 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r8
+ and $r5 $r5 0xff
+
+ // prepare dst address
+ ld b32 $r6 D[$r0 + #ctx_dst_address_high]
+ ld b32 $r7 D[$r0 + #ctx_dst_address_low]
+ shr b32 $r8 $r7 8
+ shl b32 $r6 0x18
+ or $r6 $r8
+ and $r7 $r7 0xff
+
+ // find the proper prep & do functions
+ ld b32 $r8 D[$r0 + #ctx_mode]
+ shl b32 $r8 2
+
+ // run prep
+ ld b16 $r9 D[$r8 + #crypt_dtable]
+ call $r9
+
+ // do it
+ ld b16 $r9 D[$r8 + #crypt_dtable + 2]
+ call $r9
+ cxset 1
+ xdwait
+ cxset 0x61
+ xdwait
+ xdwait
+
+ // update src address
+ shr b32 $r8 $r4 0x18
+ shl b32 $r9 $r4 8
+ add b32 $r9 $r5
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_src_address_high] $r8
+ st b32 D[$r0 + #ctx_src_address_low] $r9
+
+ // update dst address
+ shr b32 $r8 $r6 0x18
+ shl b32 $r9 $r6 8
+ add b32 $r9 $r7
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_dst_address_high] $r8
+ st b32 D[$r0 + #ctx_dst_address_low] $r9
+
+ // pull updated IV
+ cxset 2
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdld $r0 $r4
+ xdwait
+
+ ret
+
+
+crypt_copy_prep:
+ cs0begin 2
+ cxsin $c0
+ cxsout $c0
+ ret
+
+crypt_store_prep:
+ cs0begin 1
+ cxsout $c6
+ ret
+
+crypt_ecb_e_prep:
+ cs0begin 3
+ cxsin $c0
+ cenc $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_ecb_d_prep:
+ ckexp $c7 $c7
+ cs0begin 3
+ cxsin $c0
+ cdec $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_cbc_e_prep:
+ cs0begin 4
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ ret
+
+crypt_cbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cmov $c2 $c6
+ cxsin $c6
+ cdec $c0 $c6
+ cxor $c0 $c2
+ cxsout $c0
+ ret
+
+crypt_pcbc_e_prep:
+ cs0begin 5
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_pcbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cxsin $c0
+ cdec $c1 $c0
+ cxor $c6 $c1
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_cfb_e_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c6 $c0
+ cxsout $c6
+ ret
+
+crypt_cfb_d_prep:
+ cs0begin 4
+ cenc $c0 $c6
+ cxsin $c6
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ofb_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ctr_prep:
+ cs0begin 5
+ cenc $c1 $c6
+ cadd $c6 1
+ cxsin $c0
+ cxor $c0 $c1
+ cxsout $c0
+ ret
+
+crypt_cbc_mac_prep:
+ cs0begin 3
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_complete_prep:
+ cs0begin 7
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_partial_prep:
+ cs0begin 8
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+// TODO
+crypt_do_in:
+ add b32 $r3 $r5
+ mov $xdbase $r4
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_in_loop:
+ xdld $r5 $r9
+ xdwait
+ cxset 0x22
+ xdst $r0 $r9
+ cs0exec 1
+ xdwait
+ add b32 $r5 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_in_loop
+ cxset 1
+ xdwait
+ ret
+
+crypt_do_out:
+ add b32 $r3 $r7
+ mov $xdbase $r6
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_out_loop:
+ cs0exec 1
+ cxset 0x61
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r7 0x10
+ cmpu b32 $r7 $r3
+ bra ne #crypt_do_out_loop
+ ret
+
+crypt_do_inout:
+ add b32 $r3 $r5
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_inout_loop:
+ mov $xdbase $r4
+ xdld $r5 $r9
+ xdwait
+ cxset 0x21
+ xdst $r0 $r9
+ cs0exec 1
+ cxset 0x61
+ mov $xdbase $r6
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r5 0x10
+ add b32 $r7 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_inout_loop
+ ret
+
+.align 0x100
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
new file mode 100644
index 00000000000..38676c74e6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
@@ -0,0 +1,584 @@
+uint32_t nv98_pcrypt_data[] = {
+/* 0x0000: ctx_dma */
+/* 0x0000: ctx_dma_query */
+ 0x00000000,
+/* 0x0004: ctx_dma_src */
+ 0x00000000,
+/* 0x0008: ctx_dma_dst */
+ 0x00000000,
+/* 0x000c: ctx_query_address_high */
+ 0x00000000,
+/* 0x0010: ctx_query_address_low */
+ 0x00000000,
+/* 0x0014: ctx_query_counter */
+ 0x00000000,
+/* 0x0018: ctx_cond_address_high */
+ 0x00000000,
+/* 0x001c: ctx_cond_address_low */
+ 0x00000000,
+/* 0x0020: ctx_cond_off */
+ 0x00000000,
+/* 0x0024: ctx_src_address_high */
+ 0x00000000,
+/* 0x0028: ctx_src_address_low */
+ 0x00000000,
+/* 0x002c: ctx_dst_address_high */
+ 0x00000000,
+/* 0x0030: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0034: ctx_mode */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0040: ctx_key */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0050: ctx_iv */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0080: swap */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x00a0: common_cmd_dtable */
+ 0x0002000c,
+ 0xffffff00,
+ 0x00020010,
+ 0x0000000f,
+ 0x00020014,
+ 0x00000000,
+ 0x00000192,
+ 0xfffffffe,
+ 0x00020018,
+ 0xffffff00,
+ 0x0002001c,
+ 0x0000000f,
+ 0x000001d7,
+ 0xfffffff8,
+ 0x00000260,
+ 0xffffffff,
+/* 0x00e0: engine_cmd_dtable */
+ 0x00020040,
+ 0x00000000,
+ 0x00020044,
+ 0x00000000,
+ 0x00020048,
+ 0x00000000,
+ 0x0002004c,
+ 0x00000000,
+ 0x00020050,
+ 0x00000000,
+ 0x00020054,
+ 0x00000000,
+ 0x00020058,
+ 0x00000000,
+ 0x0002005c,
+ 0x00000000,
+ 0x00020024,
+ 0xffffff00,
+ 0x00020028,
+ 0x0000000f,
+ 0x0002002c,
+ 0xffffff00,
+ 0x00020030,
+ 0x0000000f,
+ 0x00000271,
+ 0xfffffff0,
+ 0x00010285,
+ 0xf000000f,
+/* 0x0150: crypt_dtable */
+ 0x04db0321,
+ 0x04b1032f,
+ 0x04db0339,
+ 0x04db034b,
+ 0x04db0361,
+ 0x04db0377,
+ 0x04db0395,
+ 0x04db03af,
+ 0x04db03cd,
+ 0x04db03e3,
+ 0x04db03f9,
+ 0x04db040f,
+ 0x04830429,
+ 0x0483043b,
+ 0x0483045d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv98_pcrypt_code[] = {
+ 0x17f004bd,
+ 0x0010fe35,
+ 0xf10004fe,
+ 0xf0fff017,
+ 0x27f10013,
+ 0x21d00400,
+ 0x0c15f0c0,
+ 0xf00021d0,
+ 0x27f10317,
+ 0x21d01200,
+ 0x1031f400,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xb00812c4,
+ 0x0bf40024,
+ 0x0027f167,
+ 0x002bfe77,
+ 0xf00007fe,
+ 0x23f00027,
+ 0x0037f105,
+ 0x0034cf14,
+ 0xb0014594,
+ 0x18f40055,
+ 0x0602fa17,
+ 0x4af003f8,
+ 0x0034d01e,
+ 0xd00147f0,
+ 0x0ef48034,
+/* 0x0075: ctxload */
+ 0x4034cf33,
+ 0xb0014f94,
+ 0x18f400f5,
+ 0x0502fa21,
+ 0x57f003f8,
+ 0x0267f000,
+/* 0x008c: ctxload_dma_loop */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
+/* 0x009f: dummyload */
+ 0xf0f018f4,
+ 0x35d00257,
+/* 0x00a5: noctx */
+ 0x0412c480,
+ 0xf50024b0,
+ 0xf100df0b,
+ 0xcf190037,
+ 0x33cf4032,
+ 0xff24e400,
+ 0x1024b607,
+ 0x07bf45e4,
+ 0xf50054b0,
+ 0xf100b90b,
+ 0xf1fae057,
+ 0xb000ce67,
+ 0x18f4c044,
+ 0xa057f14d,
+ 0x8867f1fc,
+ 0x8044b000,
+ 0xb03f18f4,
+ 0x18f46044,
+ 0x5044b019,
+ 0xf1741bf4,
+ 0xbd220027,
+ 0x0233f034,
+ 0xf50023d0,
+/* 0x0103: dma_cmd */
+ 0xb000810e,
+ 0x18f46344,
+ 0x0245945e,
+ 0xfe8050b7,
+ 0x801e39f0,
+ 0x40b70053,
+ 0x44b60120,
+ 0x0043d008,
+/* 0x0123: dtable_cmd */
+ 0xb8600ef4,
+ 0x18f40446,
+ 0x0344b63e,
+ 0x980045bb,
+ 0x53fd0145,
+ 0x0054b004,
+ 0x58291bf4,
+ 0x46580045,
+ 0x0264b001,
+ 0x98170bf4,
+ 0x67fd0807,
+ 0x0164b004,
+ 0xf9300bf4,
+ 0x0f01f455,
+/* 0x015b: cmd_setctx */
+ 0x80280ef4,
+ 0x0ef40053,
+/* 0x0161: invalid_bitfield */
+ 0x0125f022,
+/* 0x0164: dispatch_error */
+/* 0x0164: illegal_mthd */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x47f04043,
+ 0x0004d040,
+/* 0x0174: im_loop */
+ 0xf08004cf,
+ 0x44b04044,
+ 0xf71bf400,
+/* 0x0180: cmddone */
+ 0x1d0037f1,
+ 0xd00147f0,
+/* 0x018a: nocmd */
+ 0x11c40034,
+ 0x4001d00c,
+/* 0x0192: cmd_query_get */
+ 0x38f201f8,
+ 0x0325f001,
+ 0x0b0047f1,
+/* 0x019c: ptimer_retry */
+ 0xcf4046cf,
+ 0x47cf0045,
+ 0x0467b840,
+ 0x98f41bf4,
+ 0x04800504,
+ 0x21008020,
+ 0x80220580,
+ 0x0bfe2306,
+ 0x03049800,
+ 0xfe1844b6,
+ 0x04980047,
+ 0x8057f104,
+ 0x0253f000,
+ 0xf80645fa,
+/* 0x01d7: cmd_cond_mode */
+ 0xf400f803,
+ 0x25f00131,
+ 0x0534b002,
+ 0xf41218f4,
+ 0x34b00132,
+ 0x0b18f402,
+ 0x800136f0,
+/* 0x01f2: return */
+ 0x00f80803,
+/* 0x01f4: cmd_cond_mode_queryful */
+ 0x98060498,
+ 0x56c40705,
+ 0x0855b6ff,
+ 0xfd1844b6,
+ 0x47fe0545,
+ 0x000bfe00,
+ 0x008057f1,
+ 0xfa0253f0,
+ 0x34b00565,
+ 0x131bf402,
+ 0x049803f8,
+ 0x0044b021,
+ 0x800b4cf0,
+ 0x00f80804,
+/* 0x022c: cmd_cond_mode_double */
+ 0xb61060b6,
+ 0x65fa1050,
+ 0x9803f805,
+ 0x06982005,
+ 0x0456b824,
+ 0x980b4cf0,
+ 0x06982105,
+ 0x0456b825,
+ 0xfd0b5cf0,
+ 0x34b00445,
+ 0x0b5cf003,
+ 0x800645fd,
+ 0x00f80804,
+/* 0x0260: cmd_wrcache_flush */
+ 0xf10132f4,
+ 0xbd220027,
+ 0x0133f034,
+ 0xf80023d0,
+/* 0x0271: crypt_cmd_mode */
+ 0x0131f400,
+ 0xb00225f0,
+ 0x18f40f34,
+ 0x0132f409,
+/* 0x0283: crypt_cmd_mode_return */
+ 0xf80d0380,
+/* 0x0285: crypt_cmd_length */
+ 0x0034b000,
+ 0xf4fb0bf4,
+ 0x47f0033c,
+ 0x0743f040,
+ 0xf00604fa,
+ 0x43f05047,
+ 0x0604fa06,
+ 0x3cf503f8,
+ 0x47f1c407,
+ 0x4bfe2100,
+ 0x09049800,
+ 0x950a0598,
+ 0x44b60858,
+ 0x0548fd18,
+ 0x98ff55c4,
+ 0x07980b06,
+ 0x0878950c,
+ 0xfd1864b6,
+ 0x77c40568,
+ 0x0d0898ff,
+ 0x580284b6,
+ 0x95f9a889,
+ 0xf9a98958,
+ 0x013cf495,
+ 0x3cf403f8,
+ 0xf803f861,
+ 0x18489503,
+ 0xbb084994,
+ 0x81b60095,
+ 0x09088000,
+ 0x950a0980,
+ 0x69941868,
+ 0x0097bb08,
+ 0x800081b6,
+ 0x09800b08,
+ 0x023cf40c,
+ 0xf05047f0,
+ 0x04fa0643,
+ 0xf803f805,
+/* 0x0321: crypt_copy_prep */
+ 0x203cf500,
+ 0x003cf594,
+ 0x003cf588,
+/* 0x032f: crypt_store_prep */
+ 0xf500f88c,
+ 0xf594103c,
+ 0xf88c063c,
+/* 0x0339: crypt_ecb_e_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x003cf588,
+ 0x003cf5d0,
+/* 0x034b: crypt_ecb_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594303c,
+ 0xf588003c,
+ 0xf5d4003c,
+ 0xf88c003c,
+/* 0x0361: crypt_cbc_e_prep */
+ 0x403cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+/* 0x0377: crypt_cbc_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf584623c,
+ 0xf588063c,
+ 0xf5d4603c,
+ 0xf5ac203c,
+ 0xf88c003c,
+/* 0x0395: crypt_pcbc_e_prep */
+ 0x503cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+ 0x063cf58c,
+/* 0x03af: crypt_pcbc_d_prep */
+ 0xf500f8ac,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf588003c,
+ 0xf5d4013c,
+ 0xf5ac163c,
+ 0xf58c063c,
+ 0xf8ac063c,
+/* 0x03cd: crypt_cfb_e_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x063cf588,
+ 0x063cf5ac,
+/* 0x03e3: crypt_cfb_d_prep */
+ 0xf500f88c,
+ 0xf594403c,
+ 0xf5d0603c,
+ 0xf588063c,
+ 0xf5ac603c,
+ 0xf88c003c,
+/* 0x03f9: crypt_ofb_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x603cf588,
+ 0x003cf5ac,
+/* 0x040f: crypt_ctr_prep */
+ 0xf500f88c,
+ 0xf594503c,
+ 0xf5d0613c,
+ 0xf5b0163c,
+ 0xf588003c,
+ 0xf5ac103c,
+ 0xf88c003c,
+/* 0x0429: crypt_cbc_mac_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+/* 0x043b: crypt_cmac_finish_complete_prep */
+ 0xf500f8d0,
+ 0xf594703c,
+ 0xf588003c,
+ 0xf5ac063c,
+ 0xf5ac003c,
+ 0xf5d0003c,
+ 0xf5bc003c,
+ 0xf5ac063c,
+ 0xf8d0663c,
+/* 0x045d: crypt_cmac_finish_partial_prep */
+ 0x803cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x003cf5ac,
+ 0x003cf5ac,
+ 0x003cf5d0,
+ 0x003cf5bc,
+ 0x063cf5bc,
+ 0x663cf5ac,
+/* 0x0483: crypt_do_in */
+ 0xbb00f8d0,
+ 0x47fe0035,
+ 0x8097f100,
+ 0x0293f000,
+/* 0x0490: crypt_do_in_loop */
+ 0xf80559fa,
+ 0x223cf403,
+ 0xf50609fa,
+ 0xf898103c,
+ 0x1050b603,
+ 0xf40453b8,
+ 0x3cf4e91b,
+ 0xf803f801,
+/* 0x04b1: crypt_do_out */
+ 0x0037bb00,
+ 0xf10067fe,
+ 0xf0008097,
+/* 0x04be: crypt_do_out_loop */
+ 0x3cf50293,
+ 0x3cf49810,
+ 0x0579fa61,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb81070b6,
+ 0x1bf40473,
+/* 0x04db: crypt_do_inout */
+ 0xbb00f8e8,
+ 0x97f10035,
+ 0x93f00080,
+/* 0x04e5: crypt_do_inout_loop */
+ 0x0047fe02,
+ 0xf80559fa,
+ 0x213cf403,
+ 0xf50609fa,
+ 0xf498103c,
+ 0x67fe613c,
+ 0x0579fa00,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb61050b6,
+ 0x53b81070,
+ 0xd41bf404,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
new file mode 100644
index 00000000000..2551dafbec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/crypt.h>
+
+struct nv84_crypt_priv {
+ struct nouveau_engine base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static int
+nv84_crypt_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_gpuobj *obj;
+ int ret;
+
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
+ *pobject = nv_object(obj);
+ if (ret)
+ return ret;
+
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nv84_crypt_ofuncs = {
+ .ctor = nv84_crypt_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv84_crypt_sclass[] = {
+ { 0x74c1, &nv84_crypt_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_crypt_cclass = {
+ .handle = NV_ENGCTX(CRYPT, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
+ { 0x00000001, "INVALID_STATE" },
+ { 0x00000002, "ILLEGAL_MTHD" },
+ { 0x00000004, "ILLEGAL_CLASS" },
+ { 0x00000080, "QUERY" },
+ { 0x00000100, "FAULT" },
+ {}
+};
+
+static void
+nv84_crypt_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ struct nv84_crypt_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x102130);
+ u32 mthd = nv_rd32(priv, 0x102190);
+ u32 data = nv_rd32(priv, 0x102194);
+ u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat) {
+ nv_error(priv, "%s", "");
+ nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
+ pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ mthd, data);
+ }
+
+ nv_wr32(priv, 0x102130, stat);
+ nv_wr32(priv, 0x10200c, 0x10);
+
+ nouveau_engctx_put(engctx);
+}
+
+static int
+nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_crypt_priv *priv;
+ int ret;
+
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCRYPT", "crypt", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00004000;
+ nv_subdev(priv)->intr = nv84_crypt_intr;
+ nv_engine(priv)->cclass = &nv84_crypt_cclass;
+ nv_engine(priv)->sclass = nv84_crypt_sclass;
+ return 0;
+}
+
+static int
+nv84_crypt_init(struct nouveau_object *object)
+{
+ struct nv84_crypt_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_engine_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x102130, 0xffffffff);
+ nv_wr32(priv, 0x102140, 0xffffffbf);
+ nv_wr32(priv, 0x10200c, 0x00000010);
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_crypt_oclass = {
+ .handle = NV_ENGINE(CRYPT, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_crypt_ctor,
+ .dtor = _nouveau_engine_dtor,
+ .init = nv84_crypt_init,
+ .fini = _nouveau_engine_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
new file mode 100644
index 00000000000..c7082377ec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/falcon.h>
+#include <engine/fifo.h>
+#include <engine/crypt.h>
+
+#include "fuc/nv98.fuc.h"
+
+struct nv98_crypt_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_crypt_sclass[] = {
+ { 0x88b4, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_crypt_cclass = {
+ .handle = NV_ENGCTX(CRYPT, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
+ { 0x0000, "ILLEGAL_MTHD" },
+ { 0x0001, "INVALID_BITFIELD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "QUERY" },
+ {}
+};
+
+static void
+nv98_crypt_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ struct nv98_crypt_priv *priv = (void *)subdev;
+ u32 disp = nv_rd32(priv, 0x08701c);
+ u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
+ u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
+ u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x087040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(priv, 0x087044);
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat & 0x00000040) {
+ nv_error(priv, "DISPATCH_ERROR [");
+ nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ subc, mthd, data);
+ nv_wr32(priv, 0x087004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x087004, stat);
+ }
+
+ nouveau_engctx_put(engctx);
+}
+
+static int
+nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv98_crypt_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+ "PCRYPT", "crypt", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00004000;
+ nv_subdev(priv)->intr = nv98_crypt_intr;
+ nv_engine(priv)->cclass = &nv98_crypt_cclass;
+ nv_engine(priv)->sclass = nv98_crypt_sclass;
+ nv_falcon(priv)->code.data = nv98_pcrypt_code;
+ nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+ nv_falcon(priv)->data.data = nv98_pcrypt_data;
+ nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
+ return 0;
+}
+
+struct nouveau_oclass
+nv98_crypt_oclass = {
+ .handle = NV_ENGINE(CRYPT, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_crypt_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
new file mode 100644
index 00000000000..18c8c7245b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/device.h>
+#include <core/client.h>
+#include <core/option.h>
+
+#include <core/class.h>
+
+#include "priv.h"
+
+static DEFINE_MUTEX(nv_devices_mutex);
+static LIST_HEAD(nv_devices);
+
+struct nouveau_device *
+nouveau_device_find(u64 name)
+{
+ struct nouveau_device *device, *match = NULL;
+ mutex_lock(&nv_devices_mutex);
+ list_for_each_entry(device, &nv_devices, head) {
+ if (device->handle == name) {
+ match = device;
+ break;
+ }
+ }
+ mutex_unlock(&nv_devices_mutex);
+ return match;
+}
+
+/******************************************************************************
+ * nouveau_devobj (0x0080): class implementation
+ *****************************************************************************/
+struct nouveau_devobj {
+ struct nouveau_parent base;
+ struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+};
+
+static const u64 disable_map[] = {
+ [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
+ [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
+ [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
+ [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
+ [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
+ [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
+ [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
+ [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
+ [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
+ [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
+ [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC,
+ [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
+ [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
+ [NVDEV_SUBDEV_NR] = 0,
+};
+
+static int
+nouveau_devobj_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_client *client = nv_client(parent);
+ struct nouveau_device *device;
+ struct nouveau_devobj *devobj;
+ struct nv_device_class *args = data;
+ u32 boot0, strap;
+ u64 disable, mmio_base, mmio_size;
+ void __iomem *map;
+ int ret, i, c;
+
+ if (size < sizeof(struct nv_device_class))
+ return -EINVAL;
+
+ /* find the device subdev that matches what the client requested */
+ device = nv_device(client->device);
+ if (args->device != ~0) {
+ device = nouveau_device_find(args->device);
+ if (!device)
+ return -ENODEV;
+ }
+
+ ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
+ nouveau_control_oclass,
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_FIFO) |
+ (1ULL << NVDEV_ENGINE_DISP) |
+ (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
+ *pobject = nv_object(devobj);
+ if (ret)
+ return ret;
+
+ mmio_base = nv_device_resource_start(device, 0);
+ mmio_size = nv_device_resource_len(device, 0);
+
+ /* translate api disable mask into internal mapping */
+ disable = args->debug0;
+ for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+ if (args->disable & disable_map[i])
+ disable |= (1ULL << i);
+ }
+
+ /* identify the chipset, and determine classes of subdev/engines */
+ if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
+ !device->card_type) {
+ map = ioremap(mmio_base, 0x102000);
+ if (map == NULL)
+ return -ENOMEM;
+
+ /* switch mmio to cpu's native endianness */
+#ifndef __BIG_ENDIAN
+ if (ioread32_native(map + 0x000004) != 0x00000000)
+#else
+ if (ioread32_native(map + 0x000004) == 0x00000000)
+#endif
+ iowrite32_native(0x01000001, map + 0x000004);
+
+ /* read boot0 and strapping information */
+ boot0 = ioread32_native(map + 0x000000);
+ strap = ioread32_native(map + 0x101000);
+ iounmap(map);
+
+ /* determine chipset and derive architecture from it */
+ if ((boot0 & 0x1f000000) > 0) {
+ device->chipset = (boot0 & 0x1ff00000) >> 20;
+ switch (device->chipset & 0x1f0) {
+ case 0x010: {
+ if (0x461 & (1 << (device->chipset & 0xf)))
+ device->card_type = NV_10;
+ else
+ device->card_type = NV_11;
+ break;
+ }
+ case 0x020: device->card_type = NV_20; break;
+ case 0x030: device->card_type = NV_30; break;
+ case 0x040:
+ case 0x060: device->card_type = NV_40; break;
+ case 0x050:
+ case 0x080:
+ case 0x090:
+ case 0x0a0: device->card_type = NV_50; break;
+ case 0x0c0: device->card_type = NV_C0; break;
+ case 0x0d0: device->card_type = NV_D0; break;
+ case 0x0e0:
+ case 0x0f0:
+ case 0x100: device->card_type = NV_E0; break;
+ case 0x110: device->card_type = GM100; break;
+ default:
+ break;
+ }
+ } else
+ if ((boot0 & 0xff00fff0) == 0x20004000) {
+ if (boot0 & 0x00f00000)
+ device->chipset = 0x05;
+ else
+ device->chipset = 0x04;
+ device->card_type = NV_04;
+ }
+
+ switch (device->card_type) {
+ case NV_04: ret = nv04_identify(device); break;
+ case NV_10:
+ case NV_11: ret = nv10_identify(device); break;
+ case NV_20: ret = nv20_identify(device); break;
+ case NV_30: ret = nv30_identify(device); break;
+ case NV_40: ret = nv40_identify(device); break;
+ case NV_50: ret = nv50_identify(device); break;
+ case NV_C0:
+ case NV_D0: ret = nvc0_identify(device); break;
+ case NV_E0: ret = nve0_identify(device); break;
+ case GM100: ret = gm100_identify(device); break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ nv_error(device, "unknown chipset, 0x%08x\n", boot0);
+ return ret;
+ }
+
+ nv_info(device, "BOOT0 : 0x%08x\n", boot0);
+ nv_info(device, "Chipset: %s (NV%02X)\n",
+ device->cname, device->chipset);
+ nv_info(device, "Family : NV%02X\n", device->card_type);
+
+ /* determine frequency of timing crystal */
+ if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
+ (device->chipset >= 0x20 && device->chipset < 0x25))
+ strap &= 0x00000040;
+ else
+ strap &= 0x00400040;
+
+ switch (strap) {
+ case 0x00000000: device->crystal = 13500; break;
+ case 0x00000040: device->crystal = 14318; break;
+ case 0x00400000: device->crystal = 27000; break;
+ case 0x00400040: device->crystal = 25000; break;
+ }
+
+ nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
+ }
+
+ if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
+ !nv_subdev(device)->mmio) {
+ nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
+ if (!nv_subdev(device)->mmio) {
+ nv_error(device, "unable to map device registers\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* ensure requested subsystems are available for use */
+ for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
+ if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
+ continue;
+
+ if (device->subdev[i]) {
+ nouveau_object_ref(device->subdev[i],
+ &devobj->subdev[i]);
+ continue;
+ }
+
+ ret = nouveau_object_ctor(nv_object(device), NULL,
+ oclass, NULL, i,
+ &devobj->subdev[i]);
+ if (ret == -ENODEV)
+ continue;
+ if (ret)
+ return ret;
+
+ device->subdev[i] = devobj->subdev[i];
+
+ /* note: can't init *any* subdevs until devinit has been run
+ * due to not knowing exactly what the vbios init tables will
+ * mess with. devinit also can't be run until all of its
+ * dependencies have been created.
+ *
+ * this code delays init of any subdev until all of devinit's
+ * dependencies have been created, and then initialises each
+ * subdev in turn as they're created.
+ */
+ while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
+ struct nouveau_object *subdev = devobj->subdev[c++];
+ if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
+ ret = nouveau_object_inc(subdev);
+ if (ret)
+ return ret;
+ atomic_dec(&nv_object(device)->usecount);
+ } else
+ if (subdev) {
+ nouveau_subdev_reset(subdev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void
+nouveau_devobj_dtor(struct nouveau_object *object)
+{
+ struct nouveau_devobj *devobj = (void *)object;
+ int i;
+
+ for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
+ nouveau_object_ref(NULL, &devobj->subdev[i]);
+
+ nouveau_parent_destroy(&devobj->base);
+}
+
+static u8
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd08(object->engine, addr);
+}
+
+static u16
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd16(object->engine, addr);
+}
+
+static u32
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd32(object->engine, addr);
+}
+
+static void
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
+{
+ nv_wr08(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
+{
+ nv_wr16(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ nv_wr32(object->engine, addr, data);
+}
+
+static struct nouveau_ofuncs
+nouveau_devobj_ofuncs = {
+ .ctor = nouveau_devobj_ctor,
+ .dtor = nouveau_devobj_dtor,
+ .init = _nouveau_parent_init,
+ .fini = _nouveau_parent_fini,
+ .rd08 = nouveau_devobj_rd08,
+ .rd16 = nouveau_devobj_rd16,
+ .rd32 = nouveau_devobj_rd32,
+ .wr08 = nouveau_devobj_wr08,
+ .wr16 = nouveau_devobj_wr16,
+ .wr32 = nouveau_devobj_wr32,
+};
+
+/******************************************************************************
+ * nouveau_device: engine functions
+ *****************************************************************************/
+static struct nouveau_oclass
+nouveau_device_sclass[] = {
+ { 0x0080, &nouveau_devobj_ofuncs },
+ {}
+};
+
+static int
+nouveau_device_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_device *device = (void *)object;
+ struct nouveau_object *subdev;
+ int ret, i;
+
+ for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+ ret = nouveau_object_dec(subdev, suspend);
+ if (ret && suspend)
+ goto fail;
+ }
+ }
+ }
+
+ ret = 0;
+fail:
+ for (; ret && i < NVDEV_SUBDEV_NR; i++) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+ ret = nouveau_object_inc(subdev);
+ if (ret) {
+ /* XXX */
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+nouveau_device_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = (void *)object;
+ struct nouveau_object *subdev;
+ int ret, i;
+
+ for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+ ret = nouveau_object_inc(subdev);
+ if (ret)
+ goto fail;
+ } else {
+ nouveau_subdev_reset(subdev);
+ }
+ }
+ }
+
+ ret = 0;
+fail:
+ for (--i; ret && i >= 0; i--) {
+ if ((subdev = device->subdev[i])) {
+ if (!nv_iclass(subdev, NV_ENGINE_CLASS))
+ nouveau_object_dec(subdev, false);
+ }
+ }
+
+ return ret;
+}
+
+static void
+nouveau_device_dtor(struct nouveau_object *object)
+{
+ struct nouveau_device *device = (void *)object;
+
+ mutex_lock(&nv_devices_mutex);
+ list_del(&device->head);
+ mutex_unlock(&nv_devices_mutex);
+
+ if (nv_subdev(device)->mmio)
+ iounmap(nv_subdev(device)->mmio);
+
+ nouveau_engine_destroy(&device->base);
+}
+
+resource_size_t
+nv_device_resource_start(struct nouveau_device *device, unsigned int bar)
+{
+ if (nv_device_is_pci(device)) {
+ return pci_resource_start(device->pdev, bar);
+ } else {
+ struct resource *res;
+ res = platform_get_resource(device->platformdev,
+ IORESOURCE_MEM, bar);
+ if (!res)
+ return 0;
+ return res->start;
+ }
+}
+
+resource_size_t
+nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
+{
+ if (nv_device_is_pci(device)) {
+ return pci_resource_len(device->pdev, bar);
+ } else {
+ struct resource *res;
+ res = platform_get_resource(device->platformdev,
+ IORESOURCE_MEM, bar);
+ if (!res)
+ return 0;
+ return resource_size(res);
+ }
+}
+
+dma_addr_t
+nv_device_map_page(struct nouveau_device *device, struct page *page)
+{
+ dma_addr_t ret;
+
+ if (nv_device_is_pci(device)) {
+ ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(device->pdev, ret))
+ ret = 0;
+ } else {
+ ret = page_to_phys(page);
+ }
+
+ return ret;
+}
+
+void
+nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
+{
+ if (nv_device_is_pci(device))
+ pci_unmap_page(device->pdev, addr, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+}
+
+int
+nv_device_get_irq(struct nouveau_device *device, bool stall)
+{
+ if (nv_device_is_pci(device)) {
+ return device->pdev->irq;
+ } else {
+ return platform_get_irq_byname(device->platformdev,
+ stall ? "stall" : "nonstall");
+ }
+}
+
+static struct nouveau_oclass
+nouveau_device_oclass = {
+ .handle = NV_ENGINE(DEVICE, 0x00),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_device_dtor,
+ .init = nouveau_device_init,
+ .fini = nouveau_device_fini,
+ },
+};
+
+int
+nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
+ const char *sname, const char *cfg, const char *dbg,
+ int length, void **pobject)
+{
+ struct nouveau_device *device;
+ int ret = -EEXIST;
+
+ mutex_lock(&nv_devices_mutex);
+ list_for_each_entry(device, &nv_devices, head) {
+ if (device->handle == name)
+ goto done;
+ }
+
+ ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
+ "DEVICE", "device", length, pobject);
+ device = *pobject;
+ if (ret)
+ goto done;
+
+ switch (type) {
+ case NOUVEAU_BUS_PCI:
+ device->pdev = dev;
+ break;
+ case NOUVEAU_BUS_PLATFORM:
+ device->platformdev = dev;
+ break;
+ }
+ device->handle = name;
+ device->cfgopt = cfg;
+ device->dbgopt = dbg;
+ device->name = sname;
+
+ nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
+ nv_engine(device)->sclass = nouveau_device_sclass;
+ list_add(&device->head, &nv_devices);
+done:
+ mutex_unlock(&nv_devices_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
new file mode 100644
index 00000000000..4b69bf56ed0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <subdev/clock.h>
+
+#include "priv.h"
+
+static int
+nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_clock *clk = nouveau_clock(object);
+ struct nv_control_pstate_info *args = data;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ if (clk) {
+ args->count = clk->state_nr;
+ args->ustate = clk->ustate;
+ args->pstate = clk->pstate;
+ } else {
+ args->count = 0;
+ args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
+ args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_clock *clk = nouveau_clock(object);
+ struct nv_control_pstate_attr *args = data;
+ struct nouveau_clocks *domain;
+ struct nouveau_pstate *pstate;
+ struct nouveau_cstate *cstate;
+ int i = 0, j = -1;
+ u32 lo, hi;
+
+ if ((size < sizeof(*args)) || !clk ||
+ (args->state >= 0 && args->state >= clk->state_nr))
+ return -EINVAL;
+ domain = clk->domains;
+
+ while (domain->name != nv_clk_src_max) {
+ if (domain->mname && ++j == args->index)
+ break;
+ domain++;
+ }
+
+ if (domain->name == nv_clk_src_max)
+ return -EINVAL;
+
+ if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
+ list_for_each_entry(pstate, &clk->states, head) {
+ if (i++ == args->state)
+ break;
+ }
+
+ lo = pstate->base.domain[domain->name];
+ hi = lo;
+ list_for_each_entry(cstate, &pstate->list, head) {
+ lo = min(lo, cstate->domain[domain->name]);
+ hi = max(hi, cstate->domain[domain->name]);
+ }
+
+ args->state = pstate->pstate;
+ } else {
+ lo = max(clk->read(clk, domain->name), 0);
+ hi = lo;
+ }
+
+ snprintf(args->name, sizeof(args->name), "%s", domain->mname);
+ snprintf(args->unit, sizeof(args->unit), "MHz");
+ args->min = lo / domain->mdiv;
+ args->max = hi / domain->mdiv;
+
+ args->index = 0;
+ while ((++domain)->name != nv_clk_src_max) {
+ if (domain->mname) {
+ args->index = ++j;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_clock *clk = nouveau_clock(object);
+ struct nv_control_pstate_user *args = data;
+
+ if (size < sizeof(*args) || !clk)
+ return -EINVAL;
+
+ return nouveau_clock_ustate(clk, args->state);
+}
+
+struct nouveau_oclass
+nouveau_control_oclass[] = {
+ { .handle = NV_CONTROL_CLASS,
+ .ofuncs = &nouveau_object_ofuncs,
+ .omthds = (struct nouveau_omthds[]) {
+ { NV_CONTROL_PSTATE_INFO,
+ NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
+ { NV_CONTROL_PSTATE_ATTR,
+ NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
+ { NV_CONTROL_PSTATE_USER,
+ NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
+ {},
+ },
+ },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
new file mode 100644
index 00000000000..a520029e25d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
+#include <engine/perfmon.h>
+
+int
+gm100_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x117:
+ device->cname = "GM107";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+#if 0
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+#endif
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+#if 0
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = gm107_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+#endif
+ break;
+ default:
+ nv_fatal(device, "unknown Maxwell chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
new file mode 100644
index 00000000000..40b29d0214c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv04_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x04:
+ device->cname = "NV04";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x05:
+ device->cname = "NV05";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown RIVA chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
new file mode 100644
index 00000000000..5f7c25ff523
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv10_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x10:
+ device->cname = "NV10";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x15:
+ device->cname = "NV15";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x16:
+ device->cname = "NV16";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x1a:
+ device->cname = "nForce";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x11:
+ device->cname = "NV11";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x17:
+ device->cname = "NV17";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x1f:
+ device->cname = "nForce2";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x18:
+ device->cname = "NV18";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Celsius chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
new file mode 100644
index 00000000000..75fed11bba0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv20_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x20:
+ device->cname = "NV20";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x25:
+ device->cname = "NV25";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x28:
+ device->cname = "NV28";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x2a:
+ device->cname = "NV2A";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Kelvin chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
new file mode 100644
index 00000000000..36919d7db7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+
+int
+nv30_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x30:
+ device->cname = "NV30";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x35:
+ device->cname = "NV35";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x31:
+ device->cname = "NV31";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x36:
+ device->cname = "NV36";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ case 0x34:
+ device->cname = "NV34";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Rankine chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
new file mode 100644
index 00000000000..1130a62be2c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/vm.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/volt.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+#include <engine/perfmon.h>
+
+int
+nv40_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x40:
+ device->cname = "NV40";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x41:
+ device->cname = "NV41";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x42:
+ device->cname = "NV42";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x43:
+ device->cname = "NV43";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x45:
+ device->cname = "NV45";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x47:
+ device->cname = "G70";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x49:
+ device->cname = "G71";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x4b:
+ device->cname = "G73";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x44:
+ device->cname = "NV44";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x46:
+ device->cname = "G72";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x4a:
+ device->cname = "NV44A";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x4c:
+ device->cname = "C61";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x4e:
+ device->cname = "C51";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv4e_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x63:
+ device->cname = "C73";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x67:
+ device->cname = "C67";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ case 0x68:
+ device->cname = "C68";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Curie chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
new file mode 100644
index 00000000000..ef0b0bde1a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/vp.h>
+#include <engine/crypt.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+#include <engine/perfmon.h>
+
+int
+nv50_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0x50:
+ device->cname = "G80";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass;
+ break;
+ case 0x84:
+ device->cname = "G84";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0x86:
+ device->cname = "G86";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0x92:
+ device->cname = "G92";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv84_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0x94:
+ device->cname = "G94";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0x96:
+ device->cname = "G96";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0x98:
+ device->cname = "G98";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0xa0:
+ device->cname = "G200";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0xaa:
+ device->cname = "MCP77/MCP78";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0xac:
+ device->cname = "MCP79/MCP7A";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nv94_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
+ break;
+ case 0xa3:
+ device->cname = "GT215";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
+ break;
+ case 0xa5:
+ device->cname = "GT216";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
+ break;
+ case 0xa8:
+ device->cname = "GT218";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
+ break;
+ case 0xaf:
+ device->cname = "MCP89";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvaf_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Tesla chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
new file mode 100644
index 00000000000..8d55ed633b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/vp.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+#include <engine/perfmon.h>
+
+int
+nvc0_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0xc0:
+ device->cname = "GF100";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xc4:
+ device->cname = "GF104";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xc3:
+ device->cname = "GF106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xce:
+ device->cname = "GF114";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xcf:
+ device->cname = "GF116";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xc1:
+ device->cname = "GF108";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xc8:
+ device->cname = "GF110";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xd9:
+ device->cname = "GF119";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ case 0xd7:
+ device->cname = "GF117";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Fermi chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
new file mode 100644
index 00000000000..2d1e97d4264
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
+#include <engine/perfmon.h>
+
+int
+nve0_identify(struct nouveau_device *device)
+{
+ switch (device->chipset) {
+ case 0xe4:
+ device->cname = "GK104";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+ break;
+ case 0xe7:
+ device->cname = "GK107";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+ break;
+ case 0xe6:
+ device->cname = "GK106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+ break;
+ case 0xea:
+ device->cname = "GK20A";
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+ break;
+ case 0xf0:
+ device->cname = "GK110";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
+ break;
+ case 0xf1:
+ device->cname = "GK110B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
+ break;
+ case 0x108:
+ device->cname = "GK208";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
+ default:
+ nv_fatal(device, "unknown Kepler chipset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/priv.h b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
new file mode 100644
index 00000000000..035fd5b9cfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_DEVICE_PRIV_H__
+#define __NVKM_DEVICE_PRIV_H__
+
+#include <engine/device.h>
+
+extern struct nouveau_oclass nouveau_control_oclass[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
new file mode 100644
index 00000000000..9c38c5e4050
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+#include "outp.h"
+#include "conn.h"
+
+static int
+nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index)
+{
+ struct nouveau_disp *disp = event->priv;
+ struct nvkm_output *outp;
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (outp->conn->index == index) {
+ if (outp->conn->hpd.event)
+ return 0;
+ break;
+ }
+ }
+ return -ENOSYS;
+}
+
+int
+_nouveau_disp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_disp *disp = (void *)object;
+ struct nvkm_output *outp;
+ int ret;
+
+ list_for_each_entry(outp, &disp->outp, head) {
+ ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
+ if (ret && suspend)
+ goto fail_outp;
+ }
+
+ return nouveau_engine_fini(&disp->base, suspend);
+
+fail_outp:
+ list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
+ nv_ofuncs(outp)->init(nv_object(outp));
+ }
+
+ return ret;
+}
+
+int
+_nouveau_disp_init(struct nouveau_object *object)
+{
+ struct nouveau_disp *disp = (void *)object;
+ struct nvkm_output *outp;
+ int ret;
+
+ ret = nouveau_engine_init(&disp->base);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(outp, &disp->outp, head) {
+ ret = nv_ofuncs(outp)->init(nv_object(outp));
+ if (ret)
+ goto fail_outp;
+ }
+
+ return ret;
+
+fail_outp:
+ list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
+ nv_ofuncs(outp)->fini(nv_object(outp), false);
+ }
+
+ return ret;
+}
+
+void
+_nouveau_disp_dtor(struct nouveau_object *object)
+{
+ struct nouveau_disp *disp = (void *)object;
+ struct nvkm_output *outp, *outt;
+
+ nouveau_event_destroy(&disp->vblank);
+
+ if (disp->outp.next) {
+ list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+ nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+ }
+ }
+
+ nouveau_engine_destroy(&disp->base);
+}
+
+int
+nouveau_disp_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int heads,
+ const char *intname, const char *extname,
+ int length, void **pobject)
+{
+ struct nouveau_disp_impl *impl = (void *)oclass;
+ struct nouveau_bios *bios = nouveau_bios(parent);
+ struct nouveau_disp *disp;
+ struct nouveau_oclass **sclass;
+ struct nouveau_object *object;
+ struct dcb_output dcbE;
+ u8 hpd = 0, ver, hdr;
+ u32 data;
+ int ret, i;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, true,
+ intname, extname, length, pobject);
+ disp = *pobject;
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&disp->outp);
+
+ /* create output objects for each display path in the vbios */
+ i = -1;
+ while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ if (dcbE.type == DCB_OUTPUT_UNUSED)
+ continue;
+ if (dcbE.type == DCB_OUTPUT_EOL)
+ break;
+ data = dcbE.location << 4 | dcbE.type;
+
+ oclass = nvkm_output_oclass;
+ sclass = impl->outp;
+ while (sclass && sclass[0]) {
+ if (sclass[0]->handle == data) {
+ oclass = sclass[0];
+ break;
+ }
+ sclass++;
+ }
+
+ nouveau_object_ctor(*pobject, *pobject, oclass,
+ &dcbE, i, &object);
+ hpd = max(hpd, (u8)(dcbE.connector + 1));
+ }
+
+ ret = nouveau_event_create(3, hpd, &disp->hpd);
+ if (ret)
+ return ret;
+
+ disp->hpd->priv = disp;
+ disp->hpd->check = nouveau_disp_hpd_check;
+
+ ret = nouveau_event_create(1, heads, &disp->vblank);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
new file mode 100644
index 00000000000..4ffbc70ecf5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include "conn.h"
+#include "outp.h"
+
+static void
+nvkm_connector_hpd_work(struct work_struct *w)
+{
+ struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work);
+ struct nouveau_disp *disp = nouveau_disp(conn);
+ struct nouveau_gpio *gpio = nouveau_gpio(conn);
+ u32 send = NVKM_HPD_UNPLUG;
+ if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index))
+ send = NVKM_HPD_PLUG;
+ nouveau_event_trigger(disp->hpd, send, conn->index);
+ nouveau_event_get(conn->hpd.event);
+}
+
+static int
+nvkm_connector_hpd(void *data, u32 type, int index)
+{
+ struct nvkm_connector *conn = data;
+ DBG("HPD: %d\n", type);
+ schedule_work(&conn->hpd.work);
+ return NVKM_EVENT_DROP;
+}
+
+int
+_nvkm_connector_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvkm_connector *conn = (void *)object;
+ if (conn->hpd.event)
+ nouveau_event_put(conn->hpd.event);
+ return nouveau_object_fini(&conn->base, suspend);
+}
+
+int
+_nvkm_connector_init(struct nouveau_object *object)
+{
+ struct nvkm_connector *conn = (void *)object;
+ int ret = nouveau_object_init(&conn->base);
+ if (ret == 0) {
+ if (conn->hpd.event)
+ nouveau_event_get(conn->hpd.event);
+ }
+ return ret;
+}
+
+void
+_nvkm_connector_dtor(struct nouveau_object *object)
+{
+ struct nvkm_connector *conn = (void *)object;
+ nouveau_event_ref(NULL, &conn->hpd.event);
+ nouveau_object_destroy(&conn->base);
+}
+
+int
+nvkm_connector_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct nvbios_connE *info, int index,
+ int length, void **pobject)
+{
+ static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
+ struct nouveau_gpio *gpio = nouveau_gpio(parent);
+ struct nouveau_disp *disp = (void *)engine;
+ struct nvkm_connector *conn;
+ struct nvkm_output *outp;
+ struct dcb_gpio_func func;
+ int ret;
+
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (outp->conn && outp->conn->index == index) {
+ atomic_inc(&nv_object(outp->conn)->refcount);
+ *pobject = outp->conn;
+ return 1;
+ }
+ }
+
+ ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ conn = *pobject;
+ if (ret)
+ return ret;
+
+ conn->info = *info;
+ conn->index = index;
+
+ DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n",
+ info->type, info->location, info->hpd, info->dp,
+ info->di, info->sr, info->lcdid);
+
+ if ((info->hpd = ffs(info->hpd))) {
+ if (--info->hpd >= ARRAY_SIZE(hpd)) {
+ ERR("hpd %02x unknown\n", info->hpd);
+ goto done;
+ }
+ info->hpd = hpd[info->hpd];
+
+ ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
+ if (ret) {
+ ERR("func %02x lookup failed, %d\n", info->hpd, ret);
+ goto done;
+ }
+
+ ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED,
+ func.line, nvkm_connector_hpd,
+ conn, &conn->hpd.event);
+ if (ret) {
+ ERR("func %02x failed, %d\n", info->hpd, ret);
+ } else {
+ DBG("func %02x (HPD)\n", info->hpd);
+ }
+ }
+
+done:
+ INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work);
+ return 0;
+}
+
+int
+_nvkm_connector_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *info, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nvkm_connector *conn;
+ int ret;
+
+ ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
+ *pobject = nv_object(conn);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass *
+nvkm_connector_oclass = &(struct nvkm_connector_impl) {
+ .base = {
+ .handle = 0,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_connector_ctor,
+ .dtor = _nvkm_connector_dtor,
+ .init = _nvkm_connector_init,
+ .fini = _nvkm_connector_fini,
+ },
+ },
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
new file mode 100644
index 00000000000..035ebeacbb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
@@ -0,0 +1,59 @@
+#ifndef __NVKM_DISP_CONN_H__
+#define __NVKM_DISP_CONN_H__
+
+#include "priv.h"
+
+struct nvkm_connector {
+ struct nouveau_object base;
+ struct list_head head;
+
+ struct nvbios_connE info;
+ int index;
+
+ struct {
+ struct nouveau_eventh *event;
+ struct work_struct work;
+ } hpd;
+};
+
+#define nvkm_connector_create(p,e,c,b,i,d) \
+ nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
+#define nvkm_connector_destroy(d) ({ \
+ struct nvkm_connector *disp = (d); \
+ _nvkm_connector_dtor(nv_object(disp)); \
+})
+#define nvkm_connector_init(d) ({ \
+ struct nvkm_connector *disp = (d); \
+ _nvkm_connector_init(nv_object(disp)); \
+})
+#define nvkm_connector_fini(d,s) ({ \
+ struct nvkm_connector *disp = (d); \
+ _nvkm_connector_fini(nv_object(disp), (s)); \
+})
+
+int nvkm_connector_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, struct nvbios_connE *,
+ int, int, void **);
+
+int _nvkm_connector_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void _nvkm_connector_dtor(struct nouveau_object *);
+int _nvkm_connector_init(struct nouveau_object *);
+int _nvkm_connector_fini(struct nouveau_object *, bool);
+
+struct nvkm_connector_impl {
+ struct nouveau_oclass base;
+};
+
+#ifndef MSG
+#define MSG(l,f,a...) do { \
+ struct nvkm_connector *_conn = (void *)conn; \
+ nv_##l(nv_object(conn)->engine, "%02x:%02x%02x: "f, _conn->index, \
+ _conn->info.location, _conn->info.type, ##a); \
+} while(0)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 00000000000..a66b27c0fca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+ (data & NV50_DISP_DAC_PWR_VSYNC) |
+ (data & NV50_DISP_DAC_PWR_DATA) |
+ (data & NV50_DISP_DAC_PWR_STATE);
+ const u32 doff = (or * 0x800);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+ const u32 doff = (or * 0x800);
+
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+
+ nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+ mdelay(9);
+ udelay(500);
+ loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+
+ nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
+ if (!(loadval & 0x80000000))
+ return -ETIMEDOUT;
+
+ return (loadval & 0x38000000) >> 27;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_DAC_PWR:
+ ret = priv->dac.power(priv, or, data[0]);
+ break;
+ case NV50_DISP_DAC_LOAD:
+ ret = priv->dac.sense(priv, or, data[0]);
+ if (ret >= 0) {
+ data[0] = ret;
+ ret = 0;
+ }
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
new file mode 100644
index 00000000000..5a5b59b2113
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/i2c.h>
+
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "dport.h"
+#include "outpdp.h"
+
+/******************************************************************************
+ * link training
+ *****************************************************************************/
+struct dp_state {
+ struct nvkm_output_dp *outp;
+ int link_nr;
+ u32 link_bw;
+ u8 stat[6];
+ u8 conf[4];
+ bool pc2;
+ u8 pc2stat;
+ u8 pc2conf[2];
+};
+
+static int
+dp_set_link_config(struct dp_state *dp)
+{
+ struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
+ struct nvkm_output_dp *outp = dp->outp;
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvbios_init init = {
+ .subdev = nv_subdev(disp),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = &outp->base.info,
+ .crtc = -1,
+ .execute = 1,
+ };
+ u32 lnkcmp;
+ u8 sink[2];
+ int ret;
+
+ DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+
+ /* set desired link configuration on the source */
+ if ((lnkcmp = dp->outp->info.lnkcmp)) {
+ if (outp->version < 0x30) {
+ while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+ lnkcmp += 4;
+ init.offset = nv_ro16(bios, lnkcmp + 2);
+ } else {
+ while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+ lnkcmp += 3;
+ init.offset = nv_ro16(bios, lnkcmp + 1);
+ }
+
+ nvbios_exec(&init);
+ }
+
+ ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
+ outp->dpcd[DPCD_RC02] &
+ DPCD_RC02_ENHANCED_FRAME_CAP);
+ if (ret) {
+ if (ret < 0)
+ ERR("lnk_ctl failed with %d\n", ret);
+ return ret;
+ }
+
+ impl->lnk_pwr(outp, dp->link_nr);
+
+ /* set desired link configuration on the sink */
+ sink[0] = dp->link_bw / 27000;
+ sink[1] = dp->link_nr;
+ if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+ return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2);
+}
+
+static void
+dp_set_training_pattern(struct dp_state *dp, u8 pattern)
+{
+ struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
+ struct nvkm_output_dp *outp = dp->outp;
+ u8 sink_tp;
+
+ DBG("training pattern %d\n", pattern);
+ impl->pattern(outp, pattern);
+
+ nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+ sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
+ sink_tp |= pattern;
+ nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+}
+
+static int
+dp_link_train_commit(struct dp_state *dp, bool pc)
+{
+ struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
+ struct nvkm_output_dp *outp = dp->outp;
+ int ret, i;
+
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+ u8 lpc2 = (dp->pc2stat >> (i * 2)) & 0x3;
+ u8 lpre = (lane & 0x0c) >> 2;
+ u8 lvsw = (lane & 0x03) >> 0;
+ u8 hivs = 3 - lpre;
+ u8 hipe = 3;
+ u8 hipc = 3;
+
+ if (lpc2 >= hipc)
+ lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
+ if (lpre >= hipe) {
+ lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
+ lvsw = hivs = 3 - (lpre & 3);
+ } else
+ if (lvsw >= hivs) {
+ lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
+ }
+
+ dp->conf[i] = (lpre << 3) | lvsw;
+ dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
+
+ DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2);
+ impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
+ }
+
+ ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4);
+ if (ret)
+ return ret;
+
+ if (pc) {
+ ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dp_link_train_update(struct dp_state *dp, bool pc, u32 delay)
+{
+ struct nvkm_output_dp *outp = dp->outp;
+ int ret;
+
+ if (outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
+ mdelay(outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
+ else
+ udelay(delay);
+
+ ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6);
+ if (ret)
+ return ret;
+
+ if (pc) {
+ ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1);
+ if (ret)
+ dp->pc2stat = 0x00;
+ DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat);
+ } else {
+ DBG("status %6ph\n", dp->stat);
+ }
+
+ return 0;
+}
+
+static int
+dp_link_train_cr(struct dp_state *dp)
+{
+ bool cr_done = false, abort = false;
+ int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ int tries = 0, i;
+
+ dp_set_training_pattern(dp, 1);
+
+ do {
+ if (dp_link_train_commit(dp, false) ||
+ dp_link_train_update(dp, false, 100))
+ break;
+
+ cr_done = true;
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
+ cr_done = false;
+ if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
+ abort = true;
+ break;
+ }
+ }
+
+ if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
+ voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ tries = 0;
+ }
+ } while (!cr_done && !abort && ++tries < 5);
+
+ return cr_done ? 0 : -1;
+}
+
+static int
+dp_link_train_eq(struct dp_state *dp)
+{
+ struct nvkm_output_dp *outp = dp->outp;
+ bool eq_done = false, cr_done = true;
+ int tries = 0, i;
+
+ if (outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED)
+ dp_set_training_pattern(dp, 3);
+ else
+ dp_set_training_pattern(dp, 2);
+
+ do {
+ if ((tries &&
+ dp_link_train_commit(dp, dp->pc2)) ||
+ dp_link_train_update(dp, dp->pc2, 400))
+ break;
+
+ eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
+ for (i = 0; i < dp->link_nr && eq_done; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE))
+ cr_done = false;
+ if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
+ !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
+ eq_done = false;
+ }
+ } while (!eq_done && cr_done && ++tries <= 5);
+
+ return eq_done ? 0 : -1;
+}
+
+static void
+dp_link_train_init(struct dp_state *dp, bool spread)
+{
+ struct nvkm_output_dp *outp = dp->outp;
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvbios_init init = {
+ .subdev = nv_subdev(disp),
+ .bios = bios,
+ .outp = &outp->base.info,
+ .crtc = -1,
+ .execute = 1,
+ };
+
+ /* set desired spread */
+ if (spread)
+ init.offset = outp->info.script[2];
+ else
+ init.offset = outp->info.script[3];
+ nvbios_exec(&init);
+
+ /* pre-train script */
+ init.offset = outp->info.script[0];
+ nvbios_exec(&init);
+}
+
+static void
+dp_link_train_fini(struct dp_state *dp)
+{
+ struct nvkm_output_dp *outp = dp->outp;
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvbios_init init = {
+ .subdev = nv_subdev(disp),
+ .bios = bios,
+ .outp = &outp->base.info,
+ .crtc = -1,
+ .execute = 1,
+ };
+
+ /* post-train script */
+ init.offset = outp->info.script[1],
+ nvbios_exec(&init);
+}
+
+static const struct dp_rates {
+ u32 rate;
+ u8 bw;
+ u8 nr;
+} nouveau_dp_rates[] = {
+ { 2160000, 0x14, 4 },
+ { 1080000, 0x0a, 4 },
+ { 1080000, 0x14, 2 },
+ { 648000, 0x06, 4 },
+ { 540000, 0x0a, 2 },
+ { 540000, 0x14, 1 },
+ { 324000, 0x06, 2 },
+ { 270000, 0x0a, 1 },
+ { 162000, 0x06, 1 },
+ {}
+};
+
+void
+nouveau_dp_train(struct work_struct *w)
+{
+ struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ const struct dp_rates *cfg = nouveau_dp_rates;
+ struct dp_state _dp = {
+ .outp = outp,
+ }, *dp = &_dp;
+ u32 datarate = 0;
+ int ret;
+
+ /* bring capabilities within encoder limits */
+ if (nv_mclass(disp) < NVD0_DISP_CLASS)
+ outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
+ if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
+ outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
+ outp->dpcd[2] |= outp->base.info.dpconf.link_nr;
+ }
+ if (outp->dpcd[1] > outp->base.info.dpconf.link_bw)
+ outp->dpcd[1] = outp->base.info.dpconf.link_bw;
+ dp->pc2 = outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED;
+
+ /* restrict link config to the lowest required rate, if requested */
+ if (datarate) {
+ datarate = (datarate / 8) * 10; /* 8B/10B coding overhead */
+ while (cfg[1].rate >= datarate)
+ cfg++;
+ }
+ cfg--;
+
+ /* disable link interrupt handling during link training */
+ nouveau_event_put(outp->irq);
+
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dp, outp->dpcd[3] & 0x01);
+
+ while (ret = -EIO, (++cfg)->rate) {
+ /* select next configuration supported by encoder and sink */
+ while (cfg->nr > (outp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT) ||
+ cfg->bw > (outp->dpcd[DPCD_RC01_MAX_LINK_RATE]))
+ cfg++;
+ dp->link_bw = cfg->bw * 27000;
+ dp->link_nr = cfg->nr;
+
+ /* program selected link configuration */
+ ret = dp_set_link_config(dp);
+ if (ret == 0) {
+ /* attempt to train the link at this configuration */
+ memset(dp->stat, 0x00, sizeof(dp->stat));
+ if (!dp_link_train_cr(dp) &&
+ !dp_link_train_eq(dp))
+ break;
+ } else
+ if (ret) {
+ /* dp_set_link_config() handled training, or
+ * we failed to communicate with the sink.
+ */
+ break;
+ }
+ }
+
+ /* finish link training and execute post-train script from vbios */
+ dp_set_training_pattern(dp, 0);
+ if (ret < 0)
+ ERR("link training failed\n");
+
+ dp_link_train_fini(dp);
+
+ /* signal completion and enable link interrupt handling */
+ DBG("training complete\n");
+ atomic_set(&outp->lt.done, 1);
+ wake_up(&outp->lt.wait);
+ nouveau_event_get(outp->irq);
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
new file mode 100644
index 00000000000..5628d2d5ec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -0,0 +1,75 @@
+#ifndef __NVKM_DISP_DPORT_H__
+#define __NVKM_DISP_DPORT_H__
+
+/* DPCD Receiver Capabilities */
+#define DPCD_RC00_DPCD_REV 0x00000
+#define DPCD_RC01_MAX_LINK_RATE 0x00001
+#define DPCD_RC02 0x00002
+#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80
+#define DPCD_RC02_TPS3_SUPPORTED 0x40
+#define DPCD_RC02_MAX_LANE_COUNT 0x1f
+#define DPCD_RC03 0x00003
+#define DPCD_RC03_MAX_DOWNSPREAD 0x01
+#define DPCD_RC0E_AUX_RD_INTERVAL 0x0000e
+
+/* DPCD Link Configuration */
+#define DPCD_LC00_LINK_BW_SET 0x00100
+#define DPCD_LC01 0x00101
+#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
+#define DPCD_LC01_LANE_COUNT_SET 0x1f
+#define DPCD_LC02 0x00102
+#define DPCD_LC02_TRAINING_PATTERN_SET 0x03
+#define DPCD_LC03(l) ((l) + 0x00103)
+#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20
+#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
+#define DPCD_LC03_MAX_SWING_REACHED 0x04
+#define DPCD_LC03_VOLTAGE_SWING_SET 0x03
+#define DPCD_LC0F 0x0010f
+#define DPCD_LC0F_LANE1_MAX_POST_CURSOR2_REACHED 0x40
+#define DPCD_LC0F_LANE1_POST_CURSOR2_SET 0x30
+#define DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED 0x04
+#define DPCD_LC0F_LANE0_POST_CURSOR2_SET 0x03
+#define DPCD_LC10 0x00110
+#define DPCD_LC10_LANE3_MAX_POST_CURSOR2_REACHED 0x40
+#define DPCD_LC10_LANE3_POST_CURSOR2_SET 0x30
+#define DPCD_LC10_LANE2_MAX_POST_CURSOR2_REACHED 0x04
+#define DPCD_LC10_LANE2_POST_CURSOR2_SET 0x03
+
+/* DPCD Link/Sink Status */
+#define DPCD_LS02 0x00202
+#define DPCD_LS02_LANE1_SYMBOL_LOCKED 0x40
+#define DPCD_LS02_LANE1_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS02_LANE1_CR_DONE 0x10
+#define DPCD_LS02_LANE0_SYMBOL_LOCKED 0x04
+#define DPCD_LS02_LANE0_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS02_LANE0_CR_DONE 0x01
+#define DPCD_LS03 0x00203
+#define DPCD_LS03_LANE3_SYMBOL_LOCKED 0x40
+#define DPCD_LS03_LANE3_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS03_LANE3_CR_DONE 0x10
+#define DPCD_LS03_LANE2_SYMBOL_LOCKED 0x04
+#define DPCD_LS03_LANE2_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS03_LANE2_CR_DONE 0x01
+#define DPCD_LS04 0x00204
+#define DPCD_LS04_LINK_STATUS_UPDATED 0x80
+#define DPCD_LS04_DOWNSTREAM_PORT_STATUS_CHANGED 0x40
+#define DPCD_LS04_INTERLANE_ALIGN_DONE 0x01
+#define DPCD_LS06 0x00206
+#define DPCD_LS06_LANE1_PRE_EMPHASIS 0xc0
+#define DPCD_LS06_LANE1_VOLTAGE_SWING 0x30
+#define DPCD_LS06_LANE0_PRE_EMPHASIS 0x0c
+#define DPCD_LS06_LANE0_VOLTAGE_SWING 0x03
+#define DPCD_LS07 0x00207
+#define DPCD_LS07_LANE3_PRE_EMPHASIS 0xc0
+#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30
+#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c
+#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03
+#define DPCD_LS0C 0x0020c
+#define DPCD_LS0C_LANE3_POST_CURSOR2 0xc0
+#define DPCD_LS0C_LANE2_POST_CURSOR2 0x30
+#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
+#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
+
+void nouveau_dp_train(struct work_struct *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
new file mode 100644
index 00000000000..9fc7447fec9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+gm107_disp_sclass[] = {
+ { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+gm107_disp_base_oclass[] = {
+ { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = gm107_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = gm107_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ return 0;
+}
+
+struct nouveau_oclass *
+gm107_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x07),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm107_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nvd0_disp_outp_sclass,
+ .mthd.core = &nve0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 00000000000..a19e7d79b84
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x800);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8));
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 00000000000..717639386ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x030);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8));
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 00000000000..7fdade6e604
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+ nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+ nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+ nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+ nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 00000000000..db8c6fd4627
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 soff = (or * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+ nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+ nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+ nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+ nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+ nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 00000000000..5151bb26183
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+ nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+ nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 00000000000..a32666ed0c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+#include <core/event.h>
+#include <core/class.h>
+
+struct nv04_disp_priv {
+ struct nouveau_disp base;
+};
+
+static int
+nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv04_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV04_DISP_MTHD_HEAD);
+ u32 line;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
+ args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
+ args->vblanke = args->vtotal - 1;
+
+ args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
+ args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
+ args->hblanke = args->htotal - 1;
+
+ /*
+ * If output is vga instead of digital then vtotal/htotal is invalid
+ * so we have to give up and trigger the timestamping fallback in the
+ * drm core.
+ */
+ if (!args->vtotal || !args->htotal)
+ return -ENOTSUPP;
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ line = nv_rd32(priv, 0x600868 + (head * 0x2000));
+ args->time[1] = ktime_to_ns(ktime_get());
+ args->hline = (line & 0xffff0000) >> 16;
+ args->vline = (line & 0x0000ffff);
+ return 0;
+}
+
+#define HEAD_MTHD(n) (n), (n) + 0x01
+
+static struct nouveau_omthds
+nv04_disp_omthds[] = {
+ { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos },
+ {}
+};
+
+static struct nouveau_oclass
+nv04_disp_sclass[] = {
+ { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds },
+ {},
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head)
+{
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
+}
+
+static void
+nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head)
+{
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
+}
+
+static void
+nv04_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nv04_disp_priv *priv = (void *)subdev;
+ u32 crtc0 = nv_rd32(priv, 0x600100);
+ u32 crtc1 = nv_rd32(priv, 0x602100);
+ u32 pvideo;
+
+ if (crtc0 & 0x00000001) {
+ nouveau_event_trigger(priv->base.vblank, 1, 0);
+ nv_wr32(priv, 0x600100, 0x00000001);
+ }
+
+ if (crtc1 & 0x00000001) {
+ nouveau_event_trigger(priv->base.vblank, 1, 1);
+ nv_wr32(priv, 0x602100, 0x00000001);
+ }
+
+ if (nv_device(priv)->chipset >= 0x10 &&
+ nv_device(priv)->chipset <= 0x40) {
+ pvideo = nv_rd32(priv, 0x8100);
+ if (pvideo & ~0x11)
+ nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
+ nv_wr32(priv, 0x8100, pvideo);
+ }
+}
+
+static int
+nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv04_disp_sclass;
+ nv_subdev(priv)->intr = nv04_disp_intr;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv04_disp_vblank_enable;
+ priv->base.vblank->disable = nv04_disp_vblank_disable;
+ return 0;
+}
+
+struct nouveau_oclass *
+nv04_disp_oclass = &(struct nouveau_disp_impl) {
+ .base.handle = NV_ENGINE(DISP, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 00000000000..2283c442a10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,1726 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
+#include <core/enum.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/devinit.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_base *base = (void *)parent;
+ struct nv50_disp_chan *chan;
+ int ret;
+
+ if (base->chan & (1 << chid))
+ return -EBUSY;
+ base->chan |= (1 << chid);
+
+ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+ (1ULL << NVDEV_ENGINE_DMAOBJ),
+ length, pobject);
+ chan = *pobject;
+ if (ret)
+ return ret;
+
+ chan->chid = chid;
+ return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+ base->chan &= ~(1 << chan->chid);
+ nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 chid = chan->chid;
+ u32 data = (chid << 28) | (addr << 10) | chid;
+ return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+ dmac = *pobject;
+ if (ret)
+ return ret;
+
+ dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ if (!dmac->pushdma)
+ return -ENOENT;
+
+ switch (nv_mclass(dmac->pushdma)) {
+ case 0x0002:
+ case 0x003d:
+ if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+ return -EINVAL;
+
+ switch (dmac->pushdma->target) {
+ case NV_MEM_TARGET_VRAM:
+ dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_dmac *dmac = (void *)object;
+ nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+ nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static void
+nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
+ const struct nv50_disp_mthd_list *list, int inst)
+{
+ struct nouveau_object *disp = nv_object(priv);
+ int i;
+
+ for (i = 0; list->data[i].mthd; i++) {
+ if (list->data[i].addr) {
+ u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
+ u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
+ u32 mthd = list->data[i].mthd + (list->mthd * inst);
+ const char *name = list->data[i].name;
+ char mods[16];
+
+ if (prev != next)
+ snprintf(mods, sizeof(mods), "-> 0x%08x", next);
+ else
+ snprintf(mods, sizeof(mods), "%13c", ' ');
+
+ nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
+ mthd, prev, mods, name ? " // " : "",
+ name ? name : "");
+ }
+ }
+}
+
+void
+nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
+ const struct nv50_disp_mthd_chan *chan)
+{
+ struct nouveau_object *disp = nv_object(priv);
+ const struct nv50_disp_impl *impl = (void *)disp->oclass;
+ const struct nv50_disp_mthd_list *list;
+ int i, j;
+
+ if (debug > nv_subdev(priv)->debug)
+ return;
+
+ for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
+ u32 base = head * chan->addr;
+ for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
+ const char *cname = chan->name;
+ const char *sname = "";
+ char cname_[16], sname_[16];
+
+ if (chan->addr) {
+ snprintf(cname_, sizeof(cname_), "%s %d",
+ chan->name, head);
+ cname = cname_;
+ }
+
+ if (chan->data[i].nr > 1) {
+ snprintf(sname_, sizeof(sname_), " - %s %d",
+ chan->data[i].name, j);
+ sname = sname_;
+ }
+
+ nv_printk_(disp, debug, "%s%s:\n", cname, sname);
+ nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
+ list, j);
+ }
+ }
+}
+
+const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x610bb8 },
+ { 0x0088, 0x610b9c },
+ { 0x008c, 0x000000 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_dac = {
+ .mthd = 0x0080,
+ .addr = 0x000008,
+ .data = {
+ { 0x0400, 0x610b58 },
+ { 0x0404, 0x610bdc },
+ { 0x0420, 0x610828 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_sor = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0600, 0x610b70 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_pior = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0700, 0x610b80 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_mast_mthd_head = {
+ .mthd = 0x0400,
+ .addr = 0x000540,
+ .data = {
+ { 0x0800, 0x610ad8 },
+ { 0x0804, 0x610ad0 },
+ { 0x0808, 0x610a48 },
+ { 0x080c, 0x610a78 },
+ { 0x0810, 0x610ac0 },
+ { 0x0814, 0x610af8 },
+ { 0x0818, 0x610b00 },
+ { 0x081c, 0x610ae8 },
+ { 0x0820, 0x610af0 },
+ { 0x0824, 0x610b08 },
+ { 0x0828, 0x610b10 },
+ { 0x082c, 0x610a68 },
+ { 0x0830, 0x610a60 },
+ { 0x0834, 0x000000 },
+ { 0x0838, 0x610a40 },
+ { 0x0840, 0x610a24 },
+ { 0x0844, 0x610a2c },
+ { 0x0848, 0x610aa8 },
+ { 0x084c, 0x610ab0 },
+ { 0x0860, 0x610a84 },
+ { 0x0864, 0x610a90 },
+ { 0x0868, 0x610b18 },
+ { 0x086c, 0x610b20 },
+ { 0x0870, 0x610ac8 },
+ { 0x0874, 0x610a38 },
+ { 0x0880, 0x610a58 },
+ { 0x0884, 0x610a9c },
+ { 0x08a0, 0x610a70 },
+ { 0x08a4, 0x610a50 },
+ { 0x08a8, 0x610ae0 },
+ { 0x08c0, 0x610b28 },
+ { 0x08c4, 0x610b30 },
+ { 0x08c8, 0x610b40 },
+ { 0x08d4, 0x610b38 },
+ { 0x08d8, 0x610b48 },
+ { 0x08dc, 0x610b50 },
+ { 0x0900, 0x610a18 },
+ { 0x0904, 0x610ab8 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nv50_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nv50_disp_mast_mthd_base },
+ { "DAC", 3, &nv50_disp_mast_mthd_dac },
+ { "SOR", 2, &nv50_disp_mast_mthd_sor },
+ { "PIOR", 3, &nv50_disp_mast_mthd_pior },
+ { "HEAD", 2, &nv50_disp_mast_mthd_head },
+ {}
+ }
+};
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+ /* attempt to unstick channel from some unknown state */
+ if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+ nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+ if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+ nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204, mast->push);
+ nv_wr32(priv, 0x610208, 0x00010000);
+ nv_wr32(priv, 0x61020c, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610200, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+ .ctor = nv50_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_mast_init,
+ .fini = nv50_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nv50_disp_sync_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0008c4 },
+ { 0x0088, 0x0008d0 },
+ { 0x008c, 0x0008dc },
+ { 0x0090, 0x0008e4 },
+ { 0x0094, 0x610884 },
+ { 0x00a0, 0x6108a0 },
+ { 0x00a4, 0x610878 },
+ { 0x00c0, 0x61086c },
+ { 0x00e0, 0x610858 },
+ { 0x00e4, 0x610860 },
+ { 0x00e8, 0x6108ac },
+ { 0x00ec, 0x6108b4 },
+ { 0x0100, 0x610894 },
+ { 0x0110, 0x6108bc },
+ { 0x0114, 0x61088c },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_sync_mthd_image = {
+ .mthd = 0x0400,
+ .addr = 0x000000,
+ .data = {
+ { 0x0800, 0x6108f0 },
+ { 0x0804, 0x6108fc },
+ { 0x0808, 0x61090c },
+ { 0x080c, 0x610914 },
+ { 0x0810, 0x610904 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nv50_disp_sync_mthd_chan = {
+ .name = "Base",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv50_disp_sync_mthd_base },
+ { "Image", 2, &nv50_disp_sync_mthd_image },
+ {}
+ }
+};
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+ .ctor = nv50_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+const struct nv50_disp_mthd_list
+nv50_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0009a0 },
+ { 0x0088, 0x0009c0 },
+ { 0x008c, 0x0009c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nv50_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv50_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 3 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+ .ctor = nv50_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+ nv_error(pioc, "timeout0: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "timeout1: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+ .ctor = nv50_disp_oimm_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+ .ctor = nv50_disp_curs_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+int
+nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV50_DISP_MTHD_HEAD);
+ u32 blanke, blanks, total;
+
+ if (size < sizeof(*args) || head >= priv->head.nr)
+ return -EINVAL;
+ blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
+ blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
+ total = nv_rd32(priv, 0x610afc + (head * 0x540));
+
+ args->vblanke = (blanke & 0xffff0000) >> 16;
+ args->hblanke = (blanke & 0x0000ffff);
+ args->vblanks = (blanks & 0xffff0000) >> 16;
+ args->hblanks = (blanks & 0x0000ffff);
+ args->vtotal = ( total & 0xffff0000) >> 16;
+ args->htotal = ( total & 0x0000ffff);
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+ args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ return 0;
+}
+
+static void
+nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
+{
+ nv_mask(event->priv, 0x61002c, (4 << head), (4 << head));
+}
+
+static void
+nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
+{
+ nv_mask(event->priv, 0x61002c, (4 << head), 0);
+}
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv50_disp_base_vblank_enable;
+ priv->base.vblank->disable = nv50_disp_base_vblank_disable;
+ return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+ &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar. NFI what the 0x614004 caps are for..
+ */
+ tmp = nv_rd32(priv, 0x614004);
+ nv_wr32(priv, 0x610184, tmp);
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+ nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+ }
+
+ /* ... PIOR caps */
+ for (i = 0; i < priv->pior.nr; i++) {
+ tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+ nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x610024) & 0x00000100) {
+ nv_wr32(priv, 0x610024, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x61002c, 0x00000370);
+ nv_wr32(priv, 0x610028, 0x00000000);
+ return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x610024, 0x00000000);
+ nv_wr32(priv, 0x610020, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+ .ctor = nv50_disp_base_ctor,
+ .dtor = nv50_disp_base_dtor,
+ .init = nv50_disp_base_init,
+ .fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+ { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+ {}
+};
+
+static struct nouveau_oclass
+nv50_disp_sclass[] = {
+ { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nouveau_engctx *ectx;
+ int ret = -EBUSY;
+
+ /* no context needed for channel objects... */
+ if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+ atomic_inc(&parent->refcount);
+ *pobject = parent;
+ return 1;
+ }
+
+ /* allocate display hardware to client */
+ mutex_lock(&nv_subdev(priv)->mutex);
+ if (list_empty(&nv_engine(priv)->contexts)) {
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+ 0x10000, 0x10000,
+ NVOBJ_FLAG_HEAP, &ectx);
+ *pobject = nv_object(ectx);
+ }
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+ .handle = NV_ENGCTX(DISP, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_data_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+ },
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static const struct nouveau_enum
+nv50_disp_intr_error_type[] = {
+ { 3, "ILLEGAL_MTHD" },
+ { 4, "INVALID_VALUE" },
+ { 5, "INVALID_STATE" },
+ { 7, "INVALID_HANDLE" },
+ {}
+};
+
+static const struct nouveau_enum
+nv50_disp_intr_error_code[] = {
+ { 0x00, "" },
+ {}
+};
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+{
+ struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
+ u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+ u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+ u32 code = (addr & 0x00ff0000) >> 16;
+ u32 type = (addr & 0x00007000) >> 12;
+ u32 mthd = (addr & 0x00000ffc);
+ const struct nouveau_enum *ec, *et;
+ char ecunk[6], etunk[6];
+
+ et = nouveau_enum_find(nv50_disp_intr_error_type, type);
+ if (!et)
+ snprintf(etunk, sizeof(etunk), "UNK%02X", type);
+
+ ec = nouveau_enum_find(nv50_disp_intr_error_code, code);
+ if (!ec)
+ snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
+
+ nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n",
+ et ? et->name : etunk, ec ? ec->name : ecunk,
+ chid, mthd, data);
+
+ if (chid == 0) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
+ impl->mthd.core);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 2) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
+ impl->mthd.base);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 4) {
+ switch (mthd) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3,
+ impl->mthd.ovly);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nv_wr32(priv, 0x610020, 0x00010000 << chid);
+ nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+}
+
+static struct nvkm_output *
+exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+ u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
+ u16 mask, type;
+
+ if (or < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else
+ if (or < 8) {
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return NULL;
+ }
+ or -= 4;
+ } else {
+ or = or - 8;
+ type = 0x0010;
+ mask = 0;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type |= priv->pior.type[or]; break;
+ default:
+ nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+ return NULL;
+ }
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << or;
+ mask |= 0x0100 << head;
+
+ list_for_each_entry(outp, &priv->base.outp, head) {
+ if ((outp->info.hasht & 0xff) == type &&
+ (outp->info.hashm & mask) == mask) {
+ *data = nvbios_outp_match(bios, outp->info.hasht,
+ outp->info.hashm,
+ ver, hdr, cnt, len, info);
+ if (!*data)
+ return NULL;
+ return outp;
+ }
+ }
+
+ return NULL;
+}
+
+static struct nvkm_output *
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
+ struct nvbios_outp info;
+ u8 ver, hdr, cnt, len;
+ u32 data, ctrl = 0;
+ u32 reg;
+ int i;
+
+ /* DAC */
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
+ ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+ /* SOR */
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ reg = 0x610b74;
+ } else {
+ reg = 0x610798;
+ }
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
+ ctrl = nv_rd32(priv, reg + (i * 8));
+ i += 4;
+ }
+
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
+ ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+ i += 8;
+ }
+
+ if (!(ctrl & (1 << head)))
+ return NULL;
+ i--;
+
+ outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &outp->info,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ return outp;
+}
+
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ u8 ver, hdr, cnt, len;
+ u32 data, ctrl = 0;
+ u32 reg;
+ int i;
+
+ /* DAC */
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
+ ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+ /* SOR */
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ reg = 0x610b70;
+ } else {
+ reg = 0x610794;
+ }
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
+ ctrl = nv_rd32(priv, reg + (i * 8));
+ i += 4;
+ }
+
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
+ ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+ i += 8;
+ }
+
+ if (!(ctrl & (1 << head)))
+ return NULL;
+ i--;
+
+ outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+ if (!outp)
+ return NULL;
+
+ if (outp->info.location == 0) {
+ switch (outp->info.type) {
+ case DCB_OUTPUT_TMDS:
+ *conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ *conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ *conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ *conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ *conf = 0x00ff;
+ break;
+ }
+ } else {
+ *conf = (ctrl & 0x00000f00) >> 8;
+ pclk = pclk / 2;
+ }
+
+ data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data && id < 0xff) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = &outp->info,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+ }
+
+ return outp;
+}
+
+static void
+nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 1);
+}
+
+static void
+nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+{
+ struct nvkm_output *outp = exec_script(priv, head, 2);
+
+ /* the binary driver does this outside of the supervisor handling
+ * (after the third supervisor from a detach). we (currently?)
+ * allow both detach/attach to happen in the same set of
+ * supervisor interrupts, so it would make sense to execute this
+ * (full power down?) script after all the detach phases of the
+ * supervisor handling. like with training if needed from the
+ * second supervisor, nvidia doesn't do this, so who knows if it's
+ * entirely safe, but it does appear to work..
+ *
+ * without this script being run, on some configurations i've
+ * seen, switching from DP to TMDS on a DP connector may result
+ * in a blank screen (SOR_PWR off/on can restore it)
+ */
+ if (outp && outp->info.type == DCB_OUTPUT_DP) {
+ struct nvkm_output_dp *outpdp = (void *)outp;
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = nouveau_bios(priv),
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
+}
+
+static void
+nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_devinit *devinit = nouveau_devinit(priv);
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk)
+ devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
+}
+
+static void
+nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
+{
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+ const u32 symbol = 100000;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+ u32 clksor = nv_rd32(priv, 0x614300 + soff);
+ int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+ int TU, VTUi, VTUf, VTUa;
+ u64 link_data_rate, link_ratio, unk;
+ u32 best_diff = 64 * symbol;
+ u32 link_nr, link_bw, bits, r;
+
+ /* calculate packed data rate for each lane */
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ if (clksor & 0x000c0000)
+ link_bw = 270000;
+ else
+ link_bw = 162000;
+
+ if ((ctrl & 0xf0000) == 0x60000) bits = 30;
+ else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+ else bits = 18;
+
+ link_data_rate = (pclk * bits / 8) / link_nr;
+
+ /* calculate ratio of packed data rate to link symbol rate */
+ link_ratio = link_data_rate * symbol;
+ r = do_div(link_ratio, link_bw);
+
+ for (TU = 64; TU >= 32; TU--) {
+ /* calculate average number of valid symbols in each TU */
+ u32 tu_valid = link_ratio * TU;
+ u32 calc, diff;
+
+ /* find a hw representation for the fraction.. */
+ VTUi = tu_valid / symbol;
+ calc = VTUi * symbol;
+ diff = tu_valid - calc;
+ if (diff) {
+ if (diff >= (symbol / 2)) {
+ VTUf = symbol / (symbol - diff);
+ if (symbol - (VTUf * diff))
+ VTUf++;
+
+ if (VTUf <= 15) {
+ VTUa = 1;
+ calc += symbol - (symbol / VTUf);
+ } else {
+ VTUa = 0;
+ VTUf = 1;
+ calc += symbol;
+ }
+ } else {
+ VTUa = 0;
+ VTUf = min((int)(symbol / diff), 15);
+ calc += symbol / VTUf;
+ }
+
+ diff = calc - tu_valid;
+ } else {
+ /* no remainder, but the hw doesn't like the fractional
+ * part to be zero. decrement the integer part and
+ * have the fraction add a whole symbol back
+ */
+ VTUa = 0;
+ VTUf = 1;
+ VTUi--;
+ }
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ bestTU = TU;
+ bestVTUa = VTUa;
+ bestVTUf = VTUf;
+ bestVTUi = VTUi;
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (!bestTU) {
+ nv_error(priv, "unable to find suitable dp config\n");
+ return;
+ }
+
+ /* XXX close to vbios numbers, but not right */
+ unk = (symbol - link_ratio) * bestTU;
+ unk *= link_ratio;
+ r = do_div(unk, symbol);
+ r = do_div(unk, symbol);
+ unk += 6;
+
+ nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+ nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+ bestVTUf << 16 |
+ bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
+{
+ struct nvkm_output *outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 hval, hreg = 0x614200 + (head * 0x800);
+ u32 oval, oreg;
+ u32 mask, conf;
+
+ outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+ if (!outp)
+ return;
+
+ /* we allow both encoder attach and detach operations to occur
+ * within a single supervisor (ie. modeset) sequence. the
+ * encoder detach scripts quite often switch off power to the
+ * lanes, which requires the link to be re-trained.
+ *
+ * this is not generally an issue as the sink "must" (heh)
+ * signal an irq when it's lost sync so the driver can
+ * re-train.
+ *
+ * however, on some boards, if one does not configure at least
+ * the gpu side of the link *before* attaching, then various
+ * things can go horribly wrong (PDISP disappearing from mmio,
+ * third supervisor never happens, etc).
+ *
+ * the solution is simply to retrain here, if necessary. last
+ * i checked, the binary driver userspace does not appear to
+ * trigger this situation (it forces an UPDATE between steps).
+ */
+ if (outp->info.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp->info.or) - 1) * 0x08;
+ u32 ctrl, datarate;
+
+ if (outp->info.location == 0) {
+ ctrl = nv_rd32(priv, 0x610794 + soff);
+ soff = 1;
+ } else {
+ ctrl = nv_rd32(priv, 0x610b80 + soff);
+ soff = 2;
+ }
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30; break;
+ case 5: datarate = pclk * 24; break;
+ case 2:
+ default:
+ datarate = pclk * 18;
+ break;
+ }
+
+ if (nvkm_output_dp_train(outp, datarate / soff, true))
+ ERR("link not trained before attach\n");
+ }
+
+ exec_clkcmp(priv, head, 0, pclk, &conf);
+
+ if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
+ oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
+ oval = 0x00000000;
+ hval = 0x00000000;
+ mask = 0xffffffff;
+ } else
+ if (!outp->info.location) {
+ if (outp->info.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_2_dp(priv, &outp->info, pclk);
+ oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
+ oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ hval = 0x00000000;
+ mask = 0x00000707;
+ } else {
+ oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800;
+ oval = 0x00000001;
+ hval = 0x00000001;
+ mask = 0x00000707;
+ }
+
+ nv_mask(priv, hreg, 0x0000000f, hval);
+ nv_mask(priv, oreg, mask, oval);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u16 mask = (outp->sorconf.link << 6) | outp->or;
+ u8 ver, hdr;
+
+ if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+ nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
+static void
+nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
+{
+ struct nvkm_output *outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 conf;
+
+ outp = exec_clkcmp(priv, head, 1, pclk, &conf);
+ if (!outp)
+ return;
+
+ if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_0_tmds(priv, &outp->info);
+}
+
+void
+nv50_disp_intr_supervisor(struct work_struct *work)
+{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
+ struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
+ u32 super = nv_rd32(priv, 0x610030);
+ int head;
+
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
+
+ if (priv->super & 0x00000010) {
+ nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000020 << head)))
+ continue;
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk10_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000020) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000200 << head)))
+ continue;
+ nv50_disp_intr_unk20_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000040) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk40_0(priv, head);
+ }
+ }
+
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+void
+nv50_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nv50_disp_priv *priv = (void *)subdev;
+ u32 intr0 = nv_rd32(priv, 0x610020);
+ u32 intr1 = nv_rd32(priv, 0x610024);
+
+ while (intr0 & 0x001f0000) {
+ u32 chid = __ffs(intr0 & 0x001f0000) - 16;
+ nv50_disp_intr_error(priv, chid);
+ intr0 &= ~(0x00010000 << chid);
+ }
+
+ if (intr1 & 0x00000004) {
+ nouveau_event_trigger(priv->base.vblank, 1, 0);
+ nv_wr32(priv, 0x610024, 0x00000004);
+ intr1 &= ~0x00000004;
+ }
+
+ if (intr1 & 0x00000008) {
+ nouveau_event_trigger(priv->base.vblank, 1, 1);
+ nv_wr32(priv, 0x610024, 0x00000008);
+ intr1 &= ~0x00000008;
+ }
+
+ if (intr1 & 0x00000070) {
+ priv->super = (intr1 & 0x00000070);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x610024, priv->super);
+ intr1 &= ~0x00000070;
+ }
+}
+
+static int
+nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv50_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+ priv->sclass = nv50_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->pior.nr = 3;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->pior.power = nv50_pior_power;
+ return 0;
+}
+
+struct nouveau_oclass *
+nv50_disp_outp_sclass[] = {
+ &nv50_pior_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
+nv50_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x50),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nv50_disp_outp_sclass,
+ .mthd.core = &nv50_disp_mast_mthd_chan,
+ .mthd.base = &nv50_disp_sync_mthd_chan,
+ .mthd.ovly = &nv50_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 00000000000..1a886472b6f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,212 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+#include <core/event.h>
+
+#include <engine/dmaobj.h>
+
+#include "dport.h"
+#include "priv.h"
+#include "outp.h"
+#include "outpdp.h"
+
+struct nv50_disp_impl {
+ struct nouveau_disp_impl base;
+ struct {
+ const struct nv50_disp_mthd_chan *core;
+ const struct nv50_disp_mthd_chan *base;
+ const struct nv50_disp_mthd_chan *ovly;
+ int prev;
+ } mthd;
+};
+
+struct nv50_disp_priv {
+ struct nouveau_disp base;
+ struct nouveau_oclass *sclass;
+
+ struct work_struct supervisor;
+ u32 super;
+
+ struct {
+ int nr;
+ } head;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+ int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+ } dac;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+ int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+ int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+ u32 lvdsconf;
+ } sor;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int ext, u32 data);
+ u8 type[3];
+ } pior;
+};
+
+#define HEAD_MTHD(n) (n), (n) + 0x03
+
+int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32);
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+#define PIOR_MTHD(n) (n), (n) + 0x03
+
+int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_pior_power(struct nv50_disp_priv *, int, u32);
+
+struct nv50_disp_base {
+ struct nouveau_parent base;
+ struct nouveau_ramht *ramht;
+ u32 chan;
+};
+
+struct nv50_disp_chan {
+ struct nouveau_namedb base;
+ int chid;
+};
+
+int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a) \
+ nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b) \
+ nouveau_namedb_fini(&(a)->base, (b))
+
+int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+ struct nv50_disp_chan base;
+ struct nouveau_dmaobj *pushdma;
+ u32 push;
+};
+
+struct nv50_disp_pioc {
+ struct nv50_disp_chan base;
+};
+
+struct nv50_disp_mthd_list {
+ u32 mthd;
+ u32 addr;
+ struct {
+ u32 mthd;
+ u32 addr;
+ const char *name;
+ } data[];
+};
+
+struct nv50_disp_mthd_chan {
+ const char *name;
+ u32 addr;
+ struct {
+ const char *name;
+ int nr;
+ const struct nv50_disp_mthd_list *mthd;
+ } data[];
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
+ const struct nv50_disp_mthd_chan *);
+void nv50_disp_intr_supervisor(struct work_struct *);
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
+extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
+extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
+extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_omthds nvd0_disp_base_omthds[];
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr_supervisor(struct work_struct *);
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
+
+extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
+extern struct nouveau_oclass *nv50_disp_outp_sclass[];
+
+extern struct nvkm_output_dp_impl nv94_sor_dp_impl;
+int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+extern struct nouveau_oclass *nv94_disp_outp_sclass[];
+
+extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
+extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 00000000000..1cc62e43468
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+const struct nv50_disp_mthd_list
+nv84_disp_mast_mthd_dac = {
+ .mthd = 0x0080,
+ .addr = 0x000008,
+ .data = {
+ { 0x0400, 0x610b58 },
+ { 0x0404, 0x610bdc },
+ { 0x0420, 0x610bc4 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv84_disp_mast_mthd_head = {
+ .mthd = 0x0400,
+ .addr = 0x000540,
+ .data = {
+ { 0x0800, 0x610ad8 },
+ { 0x0804, 0x610ad0 },
+ { 0x0808, 0x610a48 },
+ { 0x080c, 0x610a78 },
+ { 0x0810, 0x610ac0 },
+ { 0x0814, 0x610af8 },
+ { 0x0818, 0x610b00 },
+ { 0x081c, 0x610ae8 },
+ { 0x0820, 0x610af0 },
+ { 0x0824, 0x610b08 },
+ { 0x0828, 0x610b10 },
+ { 0x082c, 0x610a68 },
+ { 0x0830, 0x610a60 },
+ { 0x0834, 0x000000 },
+ { 0x0838, 0x610a40 },
+ { 0x0840, 0x610a24 },
+ { 0x0844, 0x610a2c },
+ { 0x0848, 0x610aa8 },
+ { 0x084c, 0x610ab0 },
+ { 0x085c, 0x610c5c },
+ { 0x0860, 0x610a84 },
+ { 0x0864, 0x610a90 },
+ { 0x0868, 0x610b18 },
+ { 0x086c, 0x610b20 },
+ { 0x0870, 0x610ac8 },
+ { 0x0874, 0x610a38 },
+ { 0x0878, 0x610c50 },
+ { 0x0880, 0x610a58 },
+ { 0x0884, 0x610a9c },
+ { 0x089c, 0x610c68 },
+ { 0x08a0, 0x610a70 },
+ { 0x08a4, 0x610a50 },
+ { 0x08a8, 0x610ae0 },
+ { 0x08c0, 0x610b28 },
+ { 0x08c4, 0x610b30 },
+ { 0x08c8, 0x610b40 },
+ { 0x08d4, 0x610b38 },
+ { 0x08d8, 0x610b48 },
+ { 0x08dc, 0x610b50 },
+ { 0x0900, 0x610a18 },
+ { 0x0904, 0x610ab8 },
+ { 0x0910, 0x610c70 },
+ { 0x0914, 0x610c78 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv84_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nv50_disp_mast_mthd_base },
+ { "DAC", 3, &nv84_disp_mast_mthd_dac },
+ { "SOR", 2, &nv50_disp_mast_mthd_sor },
+ { "PIOR", 3, &nv50_disp_mast_mthd_pior },
+ { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nv84_disp_sync_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0008c4 },
+ { 0x0088, 0x0008d0 },
+ { 0x008c, 0x0008dc },
+ { 0x0090, 0x0008e4 },
+ { 0x0094, 0x610884 },
+ { 0x00a0, 0x6108a0 },
+ { 0x00a4, 0x610878 },
+ { 0x00c0, 0x61086c },
+ { 0x00c4, 0x610800 },
+ { 0x00c8, 0x61080c },
+ { 0x00cc, 0x610818 },
+ { 0x00e0, 0x610858 },
+ { 0x00e4, 0x610860 },
+ { 0x00e8, 0x6108ac },
+ { 0x00ec, 0x6108b4 },
+ { 0x00fc, 0x610824 },
+ { 0x0100, 0x610894 },
+ { 0x0104, 0x61082c },
+ { 0x0110, 0x6108bc },
+ { 0x0114, 0x61088c },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv84_disp_sync_mthd_chan = {
+ .name = "Base",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv84_disp_sync_mthd_base },
+ { "Image", 2, &nv50_disp_sync_mthd_image },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nv84_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x6109a0 },
+ { 0x0088, 0x6109c0 },
+ { 0x008c, 0x6109c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv84_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nv84_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+ { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+ { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv84_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+ priv->sclass = nv84_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->pior.nr = 3;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->pior.power = nv50_pior_power;
+ return 0;
+}
+
+struct nouveau_oclass *
+nv84_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x82),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nv50_disp_outp_sclass,
+ .mthd.core = &nv84_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 00000000000..4f718a9f5ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+const struct nv50_disp_mthd_list
+nv94_disp_mast_mthd_sor = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0600, 0x610794 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nv94_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nv50_disp_mast_mthd_base },
+ { "DAC", 3, &nv84_disp_mast_mthd_dac },
+ { "SOR", 4, &nv94_disp_mast_mthd_sor },
+ { "PIOR", 3, &nv50_disp_mast_mthd_pior },
+ { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+ { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+ { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv94_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+ priv->sclass = nv94_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->pior.nr = 3;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->pior.power = nv50_pior_power;
+ return 0;
+}
+
+struct nouveau_oclass *
+nv94_disp_outp_sclass[] = {
+ &nv50_pior_dp_impl.base.base,
+ &nv94_sor_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
+nv94_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x88),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nv94_disp_outp_sclass,
+ .mthd.core = &nv94_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 00000000000..6237a9a36f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nva0_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x6109a0 },
+ { 0x0088, 0x6109c0 },
+ { 0x008c, 0x6109c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00b0, 0x610c98 },
+ { 0x00b4, 0x610ca4 },
+ { 0x00b8, 0x610cac },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nva0_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .data = {
+ { "Global", 1, &nva0_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+ { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+ { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+ priv->sclass = nva0_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->pior.nr = 3;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->pior.power = nv50_pior_power;
+ return 0;
+}
+
+struct nouveau_oclass *
+nva0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x83),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nv50_disp_outp_sclass,
+ .mthd.core = &nv84_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nva0_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 00000000000..019124d4782
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+ { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+ { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva3_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+ priv->sclass = nva3_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->pior.nr = 3;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nva3_hda_eld;
+ priv->sor.hdmi = nva3_hdmi_ctrl;
+ priv->pior.power = nv50_pior_power;
+ return 0;
+}
+
+struct nouveau_oclass *
+nva3_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x85),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nv94_disp_outp_sclass,
+ .mthd.core = &nv94_disp_mast_mthd_chan,
+ .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.ovly = &nv84_disp_ovly_mthd_chan,
+ .mthd.prev = 0x000004,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 00000000000..fa30d8196f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,1404 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
+
+#include <engine/disp.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/devinit.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+ return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x660080 },
+ { 0x0084, 0x660084 },
+ { 0x0088, 0x660088 },
+ { 0x008c, 0x000000 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_dac = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0180, 0x660180 },
+ { 0x0184, 0x660184 },
+ { 0x0188, 0x660188 },
+ { 0x0190, 0x660190 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_sor = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0200, 0x660200 },
+ { 0x0204, 0x660204 },
+ { 0x0208, 0x660208 },
+ { 0x0210, 0x660210 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_pior = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0300, 0x660300 },
+ { 0x0304, 0x660304 },
+ { 0x0308, 0x660308 },
+ { 0x0310, 0x660310 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nvd0_disp_mast_mthd_head = {
+ .mthd = 0x0300,
+ .addr = 0x000300,
+ .data = {
+ { 0x0400, 0x660400 },
+ { 0x0404, 0x660404 },
+ { 0x0408, 0x660408 },
+ { 0x040c, 0x66040c },
+ { 0x0410, 0x660410 },
+ { 0x0414, 0x660414 },
+ { 0x0418, 0x660418 },
+ { 0x041c, 0x66041c },
+ { 0x0420, 0x660420 },
+ { 0x0424, 0x660424 },
+ { 0x0428, 0x660428 },
+ { 0x042c, 0x66042c },
+ { 0x0430, 0x660430 },
+ { 0x0434, 0x660434 },
+ { 0x0438, 0x660438 },
+ { 0x0440, 0x660440 },
+ { 0x0444, 0x660444 },
+ { 0x0448, 0x660448 },
+ { 0x044c, 0x66044c },
+ { 0x0450, 0x660450 },
+ { 0x0454, 0x660454 },
+ { 0x0458, 0x660458 },
+ { 0x045c, 0x66045c },
+ { 0x0460, 0x660460 },
+ { 0x0468, 0x660468 },
+ { 0x046c, 0x66046c },
+ { 0x0470, 0x660470 },
+ { 0x0474, 0x660474 },
+ { 0x0480, 0x660480 },
+ { 0x0484, 0x660484 },
+ { 0x048c, 0x66048c },
+ { 0x0490, 0x660490 },
+ { 0x0494, 0x660494 },
+ { 0x0498, 0x660498 },
+ { 0x04b0, 0x6604b0 },
+ { 0x04b8, 0x6604b8 },
+ { 0x04bc, 0x6604bc },
+ { 0x04c0, 0x6604c0 },
+ { 0x04c4, 0x6604c4 },
+ { 0x04c8, 0x6604c8 },
+ { 0x04d0, 0x6604d0 },
+ { 0x04d4, 0x6604d4 },
+ { 0x04e0, 0x6604e0 },
+ { 0x04e4, 0x6604e4 },
+ { 0x04e8, 0x6604e8 },
+ { 0x04ec, 0x6604ec },
+ { 0x04f0, 0x6604f0 },
+ { 0x04f4, 0x6604f4 },
+ { 0x04f8, 0x6604f8 },
+ { 0x04fc, 0x6604fc },
+ { 0x0500, 0x660500 },
+ { 0x0504, 0x660504 },
+ { 0x0508, 0x660508 },
+ { 0x050c, 0x66050c },
+ { 0x0510, 0x660510 },
+ { 0x0514, 0x660514 },
+ { 0x0518, 0x660518 },
+ { 0x051c, 0x66051c },
+ { 0x052c, 0x66052c },
+ { 0x0530, 0x660530 },
+ { 0x054c, 0x66054c },
+ { 0x0550, 0x660550 },
+ { 0x0554, 0x660554 },
+ { 0x0558, 0x660558 },
+ { 0x055c, 0x66055c },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nvd0_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nvd0_disp_mast_mthd_base },
+ { "DAC", 3, &nvd0_disp_mast_mthd_dac },
+ { "SOR", 8, &nvd0_disp_mast_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
+ { "HEAD", 4, &nvd0_disp_mast_mthd_head },
+ {}
+ }
+};
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494, mast->push);
+ nv_wr32(priv, 0x610498, 0x00010000);
+ nv_wr32(priv, 0x61049c, 0x00000001);
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+ .ctor = nvd0_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_mast_init,
+ .fini = nvd0_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nvd0_disp_sync_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x661080 },
+ { 0x0084, 0x661084 },
+ { 0x0088, 0x661088 },
+ { 0x008c, 0x66108c },
+ { 0x0090, 0x661090 },
+ { 0x0094, 0x661094 },
+ { 0x00a0, 0x6610a0 },
+ { 0x00a4, 0x6610a4 },
+ { 0x00c0, 0x6610c0 },
+ { 0x00c4, 0x6610c4 },
+ { 0x00c8, 0x6610c8 },
+ { 0x00cc, 0x6610cc },
+ { 0x00e0, 0x6610e0 },
+ { 0x00e4, 0x6610e4 },
+ { 0x00e8, 0x6610e8 },
+ { 0x00ec, 0x6610ec },
+ { 0x00fc, 0x6610fc },
+ { 0x0100, 0x661100 },
+ { 0x0104, 0x661104 },
+ { 0x0108, 0x661108 },
+ { 0x010c, 0x66110c },
+ { 0x0110, 0x661110 },
+ { 0x0114, 0x661114 },
+ { 0x0118, 0x661118 },
+ { 0x011c, 0x66111c },
+ { 0x0130, 0x661130 },
+ { 0x0134, 0x661134 },
+ { 0x0138, 0x661138 },
+ { 0x013c, 0x66113c },
+ { 0x0140, 0x661140 },
+ { 0x0144, 0x661144 },
+ { 0x0148, 0x661148 },
+ { 0x014c, 0x66114c },
+ { 0x0150, 0x661150 },
+ { 0x0154, 0x661154 },
+ { 0x0158, 0x661158 },
+ { 0x015c, 0x66115c },
+ { 0x0160, 0x661160 },
+ { 0x0164, 0x661164 },
+ { 0x0168, 0x661168 },
+ { 0x016c, 0x66116c },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nvd0_disp_sync_mthd_image = {
+ .mthd = 0x0400,
+ .addr = 0x000400,
+ .data = {
+ { 0x0400, 0x661400 },
+ { 0x0404, 0x661404 },
+ { 0x0408, 0x661408 },
+ { 0x040c, 0x66140c },
+ { 0x0410, 0x661410 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nvd0_disp_sync_mthd_chan = {
+ .name = "Base",
+ .addr = 0x001000,
+ .data = {
+ { "Global", 1, &nvd0_disp_sync_mthd_base },
+ { "Image", 2, &nvd0_disp_sync_mthd_image },
+ {}
+ }
+};
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+ .ctor = nvd0_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nvd0_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .data = {
+ { 0x0080, 0x665080 },
+ { 0x0084, 0x665084 },
+ { 0x0088, 0x665088 },
+ { 0x008c, 0x66508c },
+ { 0x0090, 0x665090 },
+ { 0x0094, 0x665094 },
+ { 0x00a0, 0x6650a0 },
+ { 0x00a4, 0x6650a4 },
+ { 0x00b0, 0x6650b0 },
+ { 0x00b4, 0x6650b4 },
+ { 0x00b8, 0x6650b8 },
+ { 0x00c0, 0x6650c0 },
+ { 0x00e0, 0x6650e0 },
+ { 0x00e4, 0x6650e4 },
+ { 0x00e8, 0x6650e8 },
+ { 0x0100, 0x665100 },
+ { 0x0104, 0x665104 },
+ { 0x0108, 0x665108 },
+ { 0x010c, 0x66510c },
+ { 0x0110, 0x665110 },
+ { 0x0118, 0x665118 },
+ { 0x011c, 0x66511c },
+ { 0x0120, 0x665120 },
+ { 0x0124, 0x665124 },
+ { 0x0130, 0x665130 },
+ { 0x0134, 0x665134 },
+ { 0x0138, 0x665138 },
+ { 0x013c, 0x66513c },
+ { 0x0140, 0x665140 },
+ { 0x0144, 0x665144 },
+ { 0x0148, 0x665148 },
+ { 0x014c, 0x66514c },
+ { 0x0150, 0x665150 },
+ { 0x0154, 0x665154 },
+ { 0x0158, 0x665158 },
+ { 0x015c, 0x66515c },
+ { 0x0160, 0x665160 },
+ { 0x0164, 0x665164 },
+ { 0x0168, 0x665168 },
+ { 0x016c, 0x66516c },
+ { 0x0400, 0x665400 },
+ { 0x0408, 0x665408 },
+ { 0x040c, 0x66540c },
+ { 0x0410, 0x665410 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_chan
+nvd0_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x001000,
+ .data = {
+ { "Global", 1, &nvd0_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 5 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+ .ctor = nvd0_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* activate channel */
+ nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+ .ctor = nvd0_disp_oimm_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+ .ctor = nvd0_disp_curs_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV50_DISP_MTHD_HEAD);
+ u32 blanke, blanks, total;
+
+ if (size < sizeof(*args) || head >= priv->head.nr)
+ return -EINVAL;
+
+ total = nv_rd32(priv, 0x640414 + (head * 0x300));
+ blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
+ blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
+
+ args->vblanke = (blanke & 0xffff0000) >> 16;
+ args->hblanke = (blanke & 0x0000ffff);
+ args->vblanks = (blanks & 0xffff0000) >> 16;
+ args->hblanks = (blanks & 0x0000ffff);
+ args->vtotal = ( total & 0xffff0000) >> 16;
+ args->htotal = ( total & 0x0000ffff);
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+ args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ return 0;
+}
+
+static void
+nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+static void
+nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
+ priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
+
+ return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+ &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar.
+ */
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+ nv_wr32(priv, 0x6100ac, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x610090, 0x00000000);
+ nv_wr32(priv, 0x6100a0, 0x00000000);
+ nv_wr32(priv, 0x6100b0, 0x00000307);
+
+ /* disable underflow reporting, preventing an intermittent issue
+ * on some nve4 boards where the production vbios left this
+ * setting enabled by default.
+ *
+ * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+ */
+ for (i = 0; i < priv->head.nr; i++)
+ nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
+ return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x6100b0, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+ .ctor = nvd0_disp_base_ctor,
+ .dtor = nvd0_disp_base_dtor,
+ .init = nvd0_disp_base_init,
+ .fini = nvd0_disp_base_fini,
+};
+
+struct nouveau_omthds
+nvd0_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos },
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+ { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ {}
+};
+
+static struct nouveau_oclass
+nvd0_disp_sclass[] = {
+ { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static struct nvkm_output *
+exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+ u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
+ u16 mask, type;
+
+ if (or < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ or -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << or;
+ mask |= 0x0100 << head;
+
+ list_for_each_entry(outp, &priv->base.outp, head) {
+ if ((outp->info.hasht & 0xff) == type &&
+ (outp->info.hashm & mask) == mask) {
+ *data = nvbios_outp_match(bios, outp->info.hasht,
+ outp->info.hashm,
+ ver, hdr, cnt, len, info);
+ if (!*data)
+ return NULL;
+ return outp;
+ }
+ }
+
+ return NULL;
+}
+
+static struct nvkm_output *
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
+ struct nvbios_outp info;
+ u8 ver, hdr, cnt, len;
+ u32 data, ctrl = 0;
+ int or;
+
+ for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+ ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (or == 8)
+ return NULL;
+
+ outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &outp->info,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ return outp;
+}
+
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ u8 ver, hdr, cnt, len;
+ u32 data, ctrl = 0;
+ int or;
+
+ for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+ ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (or == 8)
+ return NULL;
+
+ outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+ if (!outp)
+ return NULL;
+
+ switch (outp->info.type) {
+ case DCB_OUTPUT_TMDS:
+ *conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ *conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ *conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ *conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ *conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data && id < 0xff) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = &outp->info,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+ }
+
+ return outp;
+}
+
+static void
+nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 1);
+}
+
+static void
+nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
+{
+ struct nvkm_output *outp = exec_script(priv, head, 2);
+
+ /* see note in nv50_disp_intr_unk20_0() */
+ if (outp && outp->info.type == DCB_OUTPUT_DP) {
+ struct nvkm_output_dp *outpdp = (void *)outp;
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = nouveau_bios(priv),
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
+}
+
+static void
+nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_devinit *devinit = nouveau_devinit(priv);
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ if (pclk)
+ devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+}
+
+static void
+nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
+ struct dcb_output *outp)
+{
+ const int or = ffs(outp->or) - 1;
+ const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
+ const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+ const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+ const u32 hoff = (head * 0x800);
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 symbol = 100000;
+ const u32 TU = 64;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+ u32 clksor = nv_rd32(priv, 0x612300 + soff);
+ u32 datarate, link_nr, link_bw, bits;
+ u64 ratio, value;
+
+ if ((conf & 0x3c0) == 0x180) bits = 30;
+ else if ((conf & 0x3c0) == 0x140) bits = 24;
+ else bits = 18;
+ datarate = (pclk * bits) / 8;
+
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ link_bw = (clksor & 0x007c0000) >> 18;
+ link_bw *= 27000;
+
+ ratio = datarate;
+ ratio *= symbol;
+ do_div(ratio, link_nr * link_bw);
+
+ value = (symbol - ratio) * TU;
+ value *= ratio;
+ do_div(value, symbol);
+ do_div(value, symbol);
+
+ value += 5;
+ value |= 0x08000000;
+
+ nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
+{
+ struct nvkm_output *outp;
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ u32 conf, addr, data;
+
+ outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+ if (!outp)
+ return;
+
+ /* see note in nv50_disp_intr_unk20_2() */
+ if (outp->info.type == DCB_OUTPUT_DP) {
+ u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
+ switch ((sync & 0x000003c0) >> 6) {
+ case 6: pclk = pclk * 30; break;
+ case 5: pclk = pclk * 24; break;
+ case 2:
+ default:
+ pclk = pclk * 18;
+ break;
+ }
+
+ if (nvkm_output_dp_train(outp, pclk, true))
+ ERR("link not trained before attach\n");
+ }
+
+ exec_clkcmp(priv, head, 0, pclk, &conf);
+
+ if (outp->info.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
+ data = 0x00000000;
+ } else {
+ if (outp->info.type == DCB_OUTPUT_DP)
+ nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
+ addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
+ data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ }
+
+ nv_mask(priv, addr, 0x00000707, data);
+}
+
+static void
+nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
+{
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ u32 conf;
+
+ exec_clkcmp(priv, head, 1, pclk, &conf);
+}
+
+void
+nvd0_disp_intr_supervisor(struct work_struct *work)
+{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
+ struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
+ u32 mask[4];
+ int head;
+
+ nv_debug(priv, "supervisor %d\n", ffs(priv->super));
+ for (head = 0; head < priv->head.nr; head++) {
+ mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
+ nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
+ }
+
+ if (priv->super & 0x00000001) {
+ nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nv_debug(priv, "supervisor 1.0 - head %d\n", head);
+ nvd0_disp_intr_unk1_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000002) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nv_debug(priv, "supervisor 2.0 - head %d\n", head);
+ nvd0_disp_intr_unk2_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00010000))
+ continue;
+ nv_debug(priv, "supervisor 2.1 - head %d\n", head);
+ nvd0_disp_intr_unk2_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nv_debug(priv, "supervisor 2.2 - head %d\n", head);
+ nvd0_disp_intr_unk2_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000004) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nv_debug(priv, "supervisor 3.0 - head %d\n", head);
+ nvd0_disp_intr_unk4_0(priv, head);
+ }
+ }
+
+ for (head = 0; head < priv->head.nr; head++)
+ nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+{
+ const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
+ u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+ u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+ u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+ "0x%08x 0x%08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+ if (chid == 0) {
+ switch (mthd & 0xffc) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
+ impl->mthd.core);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 4) {
+ switch (mthd & 0xffc) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
+ impl->mthd.base);
+ break;
+ default:
+ break;
+ }
+ } else
+ if (chid <= 8) {
+ switch (mthd & 0xffc) {
+ case 0x0080:
+ nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
+ impl->mthd.ovly);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nv_wr32(priv, 0x61009c, (1 << chid));
+ nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+}
+
+void
+nvd0_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nv50_disp_priv *priv = (void *)subdev;
+ u32 intr = nv_rd32(priv, 0x610088);
+ int i;
+
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x61008c);
+ nv_wr32(priv, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
+ if (intr & 0x00000002) {
+ u32 stat = nv_rd32(priv, 0x61009c);
+ int chid = ffs(stat) - 1;
+ if (chid >= 0)
+ nvd0_disp_intr_error(priv, chid);
+ intr &= ~0x00000002;
+ }
+
+ if (intr & 0x00100000) {
+ u32 stat = nv_rd32(priv, 0x6100ac);
+ if (stat & 0x00000007) {
+ priv->super = (stat & 0x00000007);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x6100ac, priv->super);
+ stat &= ~0x00000007;
+ }
+
+ if (stat) {
+ nv_info(priv, "unknown intr24 0x%08x\n", stat);
+ nv_wr32(priv, 0x6100ac, stat);
+ }
+
+ intr &= ~0x00100000;
+ }
+
+ for (i = 0; i < priv->head.nr; i++) {
+ u32 mask = 0x01000000 << i;
+ if (mask & intr) {
+ u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
+ if (stat & 0x00000001)
+ nouveau_event_trigger(priv->base.vblank, 1, i);
+ nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
+ nv_rd32(priv, 0x6100c0 + (i * 0x800));
+ }
+ }
+}
+
+static int
+nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = nvd0_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ return 0;
+}
+
+struct nouveau_oclass *
+nvd0_disp_outp_sclass[] = {
+ &nvd0_sor_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
+nvd0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x90),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nvd0_disp_outp_sclass,
+ .mthd.core = &nvd0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 00000000000..11328e3f5df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nve0_disp_mast_mthd_head = {
+ .mthd = 0x0300,
+ .addr = 0x000300,
+ .data = {
+ { 0x0400, 0x660400 },
+ { 0x0404, 0x660404 },
+ { 0x0408, 0x660408 },
+ { 0x040c, 0x66040c },
+ { 0x0410, 0x660410 },
+ { 0x0414, 0x660414 },
+ { 0x0418, 0x660418 },
+ { 0x041c, 0x66041c },
+ { 0x0420, 0x660420 },
+ { 0x0424, 0x660424 },
+ { 0x0428, 0x660428 },
+ { 0x042c, 0x66042c },
+ { 0x0430, 0x660430 },
+ { 0x0434, 0x660434 },
+ { 0x0438, 0x660438 },
+ { 0x0440, 0x660440 },
+ { 0x0444, 0x660444 },
+ { 0x0448, 0x660448 },
+ { 0x044c, 0x66044c },
+ { 0x0450, 0x660450 },
+ { 0x0454, 0x660454 },
+ { 0x0458, 0x660458 },
+ { 0x045c, 0x66045c },
+ { 0x0460, 0x660460 },
+ { 0x0468, 0x660468 },
+ { 0x046c, 0x66046c },
+ { 0x0470, 0x660470 },
+ { 0x0474, 0x660474 },
+ { 0x047c, 0x66047c },
+ { 0x0480, 0x660480 },
+ { 0x0484, 0x660484 },
+ { 0x0488, 0x660488 },
+ { 0x048c, 0x66048c },
+ { 0x0490, 0x660490 },
+ { 0x0494, 0x660494 },
+ { 0x0498, 0x660498 },
+ { 0x04a0, 0x6604a0 },
+ { 0x04b0, 0x6604b0 },
+ { 0x04b8, 0x6604b8 },
+ { 0x04bc, 0x6604bc },
+ { 0x04c0, 0x6604c0 },
+ { 0x04c4, 0x6604c4 },
+ { 0x04c8, 0x6604c8 },
+ { 0x04d0, 0x6604d0 },
+ { 0x04d4, 0x6604d4 },
+ { 0x04e0, 0x6604e0 },
+ { 0x04e4, 0x6604e4 },
+ { 0x04e8, 0x6604e8 },
+ { 0x04ec, 0x6604ec },
+ { 0x04f0, 0x6604f0 },
+ { 0x04f4, 0x6604f4 },
+ { 0x04f8, 0x6604f8 },
+ { 0x04fc, 0x6604fc },
+ { 0x0500, 0x660500 },
+ { 0x0504, 0x660504 },
+ { 0x0508, 0x660508 },
+ { 0x050c, 0x66050c },
+ { 0x0510, 0x660510 },
+ { 0x0514, 0x660514 },
+ { 0x0518, 0x660518 },
+ { 0x051c, 0x66051c },
+ { 0x0520, 0x660520 },
+ { 0x0524, 0x660524 },
+ { 0x052c, 0x66052c },
+ { 0x0530, 0x660530 },
+ { 0x054c, 0x66054c },
+ { 0x0550, 0x660550 },
+ { 0x0554, 0x660554 },
+ { 0x0558, 0x660558 },
+ { 0x055c, 0x66055c },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nve0_disp_mast_mthd_chan = {
+ .name = "Core",
+ .addr = 0x000000,
+ .data = {
+ { "Global", 1, &nvd0_disp_mast_mthd_base },
+ { "DAC", 3, &nvd0_disp_mast_mthd_dac },
+ { "SOR", 8, &nvd0_disp_mast_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
+ { "HEAD", 4, &nve0_disp_mast_mthd_head },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static const struct nv50_disp_mthd_list
+nve0_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .data = {
+ { 0x0080, 0x665080 },
+ { 0x0084, 0x665084 },
+ { 0x0088, 0x665088 },
+ { 0x008c, 0x66508c },
+ { 0x0090, 0x665090 },
+ { 0x0094, 0x665094 },
+ { 0x00a0, 0x6650a0 },
+ { 0x00a4, 0x6650a4 },
+ { 0x00b0, 0x6650b0 },
+ { 0x00b4, 0x6650b4 },
+ { 0x00b8, 0x6650b8 },
+ { 0x00c0, 0x6650c0 },
+ { 0x00c4, 0x6650c4 },
+ { 0x00e0, 0x6650e0 },
+ { 0x00e4, 0x6650e4 },
+ { 0x00e8, 0x6650e8 },
+ { 0x0100, 0x665100 },
+ { 0x0104, 0x665104 },
+ { 0x0108, 0x665108 },
+ { 0x010c, 0x66510c },
+ { 0x0110, 0x665110 },
+ { 0x0118, 0x665118 },
+ { 0x011c, 0x66511c },
+ { 0x0120, 0x665120 },
+ { 0x0124, 0x665124 },
+ { 0x0130, 0x665130 },
+ { 0x0134, 0x665134 },
+ { 0x0138, 0x665138 },
+ { 0x013c, 0x66513c },
+ { 0x0140, 0x665140 },
+ { 0x0144, 0x665144 },
+ { 0x0148, 0x665148 },
+ { 0x014c, 0x66514c },
+ { 0x0150, 0x665150 },
+ { 0x0154, 0x665154 },
+ { 0x0158, 0x665158 },
+ { 0x015c, 0x66515c },
+ { 0x0160, 0x665160 },
+ { 0x0164, 0x665164 },
+ { 0x0168, 0x665168 },
+ { 0x016c, 0x66516c },
+ { 0x0400, 0x665400 },
+ { 0x0404, 0x665404 },
+ { 0x0408, 0x665408 },
+ { 0x040c, 0x66540c },
+ { 0x0410, 0x665410 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_chan
+nve0_disp_ovly_mthd_chan = {
+ .name = "Overlay",
+ .addr = 0x001000,
+ .data = {
+ { "Global", 1, &nve0_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+ { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+ { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nve0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = nve0_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ return 0;
+}
+
+struct nouveau_oclass *
+nve0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x91),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nvd0_disp_outp_sclass,
+ .mthd.core = &nve0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
new file mode 100644
index 00000000000..104388081d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvf0_disp_sclass[] = {
+ { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nvf0_disp_base_oclass[] = {
+ { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nvf0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = nvf0_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ return 0;
+}
+
+struct nouveau_oclass *
+nvf0_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x92),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvf0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.outp = nvd0_disp_outp_sclass,
+ .mthd.core = &nve0_disp_mast_mthd_chan,
+ .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
new file mode 100644
index 00000000000..ad9ba7ccec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+
+#include "outp.h"
+
+int
+_nvkm_output_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvkm_output *outp = (void *)object;
+ nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
+ return nouveau_object_fini(&outp->base, suspend);
+}
+
+int
+_nvkm_output_init(struct nouveau_object *object)
+{
+ struct nvkm_output *outp = (void *)object;
+ int ret = nouveau_object_init(&outp->base);
+ if (ret == 0)
+ nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
+ return 0;
+}
+
+void
+_nvkm_output_dtor(struct nouveau_object *object)
+{
+ struct nvkm_output *outp = (void *)object;
+ list_del(&outp->head);
+ nouveau_object_ref(NULL, (void *)&outp->conn);
+ nouveau_object_destroy(&outp->base);
+}
+
+int
+nvkm_output_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct dcb_output *dcbE, int index,
+ int length, void **pobject)
+{
+ struct nouveau_bios *bios = nouveau_bios(engine);
+ struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nouveau_disp *disp = (void *)engine;
+ struct nvbios_connE connE;
+ struct nvkm_output *outp;
+ u8 ver, hdr;
+ u32 data;
+ int ret;
+
+ ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ outp = *pobject;
+ if (ret)
+ return ret;
+
+ outp->info = *dcbE;
+ outp->index = index;
+
+ DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
+ dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
+ dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
+ dcbE->bus, dcbE->heads);
+
+ outp->port = i2c->find(i2c, outp->info.i2c_index);
+ outp->edid = outp->port;
+
+ data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
+ if (!data) {
+ DBG("vbios connector data not found\n");
+ memset(&connE, 0x00, sizeof(connE));
+ connE.type = DCB_CONNECTOR_NONE;
+ }
+
+ ret = nouveau_object_ctor(parent, engine, nvkm_connector_oclass,
+ &connE, outp->info.connector,
+ (struct nouveau_object **)&outp->conn);
+ if (ret < 0) {
+ ERR("error %d creating connector, disabling\n", ret);
+ return ret;
+ }
+
+ list_add_tail(&outp->head, &disp->outp);
+ return 0;
+}
+
+int
+_nvkm_output_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *dcbE, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nvkm_output *outp;
+ int ret;
+
+ ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
+ *pobject = nv_object(outp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass *
+nvkm_output_oclass = &(struct nvkm_output_impl) {
+ .base = {
+ .handle = 0,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_output_ctor,
+ .dtor = _nvkm_output_dtor,
+ .init = _nvkm_output_init,
+ .fini = _nvkm_output_fini,
+ },
+ },
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
new file mode 100644